diff -urNp linux-2.6.39.1/arch/alpha/include/asm/dma-mapping.h linux-2.6.39.1/arch/alpha/include/asm/dma-mapping.h --- linux-2.6.39.1/arch/alpha/include/asm/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/include/asm/dma-mapping.h 2011-05-22 19:36:30.000000000 -0400 @@ -3,9 +3,9 @@ #include -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { return dma_ops; } diff -urNp linux-2.6.39.1/arch/alpha/include/asm/elf.h linux-2.6.39.1/arch/alpha/include/asm/elf.h --- linux-2.6.39.1/arch/alpha/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/include/asm/elf.h 2011-05-22 19:36:30.000000000 -0400 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL) + +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28) +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19) +#endif + /* $0 is set by ld.so to a pointer to a function which might be registered using atexit. This provides a mean for the dynamic linker to call DT_FINI functions for shared libraries that have diff -urNp linux-2.6.39.1/arch/alpha/include/asm/pgtable.h linux-2.6.39.1/arch/alpha/include/asm/pgtable.h --- linux-2.6.39.1/arch/alpha/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/include/asm/pgtable.h 2011-05-22 19:36:30.000000000 -0400 @@ -101,6 +101,17 @@ struct vm_area_struct; #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) + +#ifdef CONFIG_PAX_PAGEEXEC +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE) +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) +#else +# define PAGE_SHARED_NOEXEC PAGE_SHARED +# define PAGE_COPY_NOEXEC PAGE_COPY +# define PAGE_READONLY_NOEXEC PAGE_READONLY +#endif + #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) diff -urNp linux-2.6.39.1/arch/alpha/kernel/core_apecs.c linux-2.6.39.1/arch/alpha/kernel/core_apecs.c --- linux-2.6.39.1/arch/alpha/kernel/core_apecs.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/kernel/core_apecs.c 2011-05-22 19:36:30.000000000 -0400 @@ -305,7 +305,7 @@ apecs_write_config(struct pci_bus *bus, return PCIBIOS_SUCCESSFUL; } -struct pci_ops apecs_pci_ops = +const struct pci_ops apecs_pci_ops = { .read = apecs_read_config, .write = apecs_write_config, diff -urNp linux-2.6.39.1/arch/alpha/kernel/core_cia.c linux-2.6.39.1/arch/alpha/kernel/core_cia.c --- linux-2.6.39.1/arch/alpha/kernel/core_cia.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/kernel/core_cia.c 2011-05-22 19:36:30.000000000 -0400 @@ -239,7 +239,7 @@ cia_write_config(struct pci_bus *bus, un return PCIBIOS_SUCCESSFUL; } -struct pci_ops cia_pci_ops = +const struct pci_ops cia_pci_ops = { .read = cia_read_config, .write = cia_write_config, diff -urNp linux-2.6.39.1/arch/alpha/kernel/core_irongate.c linux-2.6.39.1/arch/alpha/kernel/core_irongate.c --- linux-2.6.39.1/arch/alpha/kernel/core_irongate.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/kernel/core_irongate.c 2011-05-22 19:36:30.000000000 -0400 @@ -155,7 +155,7 @@ irongate_write_config(struct pci_bus *bu return PCIBIOS_SUCCESSFUL; } -struct pci_ops irongate_pci_ops = +const struct pci_ops irongate_pci_ops = { .read = irongate_read_config, .write = irongate_write_config, diff -urNp linux-2.6.39.1/arch/alpha/kernel/core_lca.c linux-2.6.39.1/arch/alpha/kernel/core_lca.c --- linux-2.6.39.1/arch/alpha/kernel/core_lca.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/kernel/core_lca.c 2011-05-22 19:36:30.000000000 -0400 @@ -231,7 +231,7 @@ lca_write_config(struct pci_bus *bus, un return PCIBIOS_SUCCESSFUL; } -struct pci_ops lca_pci_ops = +const struct pci_ops lca_pci_ops = { .read = lca_read_config, .write = lca_write_config, diff -urNp linux-2.6.39.1/arch/alpha/kernel/core_marvel.c linux-2.6.39.1/arch/alpha/kernel/core_marvel.c --- linux-2.6.39.1/arch/alpha/kernel/core_marvel.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/kernel/core_marvel.c 2011-05-22 19:36:30.000000000 -0400 @@ -588,7 +588,7 @@ marvel_write_config(struct pci_bus *bus, return PCIBIOS_SUCCESSFUL; } -struct pci_ops marvel_pci_ops = +const struct pci_ops marvel_pci_ops = { .read = marvel_read_config, .write = marvel_write_config, diff -urNp linux-2.6.39.1/arch/alpha/kernel/core_mcpcia.c linux-2.6.39.1/arch/alpha/kernel/core_mcpcia.c --- linux-2.6.39.1/arch/alpha/kernel/core_mcpcia.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/kernel/core_mcpcia.c 2011-05-22 19:36:30.000000000 -0400 @@ -235,7 +235,7 @@ mcpcia_write_config(struct pci_bus *bus, return PCIBIOS_SUCCESSFUL; } -struct pci_ops mcpcia_pci_ops = +const struct pci_ops mcpcia_pci_ops = { .read = mcpcia_read_config, .write = mcpcia_write_config, diff -urNp linux-2.6.39.1/arch/alpha/kernel/core_polaris.c linux-2.6.39.1/arch/alpha/kernel/core_polaris.c --- linux-2.6.39.1/arch/alpha/kernel/core_polaris.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/kernel/core_polaris.c 2011-05-22 19:36:30.000000000 -0400 @@ -136,7 +136,7 @@ polaris_write_config(struct pci_bus *bus return PCIBIOS_SUCCESSFUL; } -struct pci_ops polaris_pci_ops = +const struct pci_ops polaris_pci_ops = { .read = polaris_read_config, .write = polaris_write_config, diff -urNp linux-2.6.39.1/arch/alpha/kernel/core_t2.c linux-2.6.39.1/arch/alpha/kernel/core_t2.c --- linux-2.6.39.1/arch/alpha/kernel/core_t2.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/kernel/core_t2.c 2011-05-22 19:36:30.000000000 -0400 @@ -314,7 +314,7 @@ t2_write_config(struct pci_bus *bus, uns return PCIBIOS_SUCCESSFUL; } -struct pci_ops t2_pci_ops = +const struct pci_ops t2_pci_ops = { .read = t2_read_config, .write = t2_write_config, diff -urNp linux-2.6.39.1/arch/alpha/kernel/core_titan.c linux-2.6.39.1/arch/alpha/kernel/core_titan.c --- linux-2.6.39.1/arch/alpha/kernel/core_titan.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/kernel/core_titan.c 2011-05-22 19:36:30.000000000 -0400 @@ -191,7 +191,7 @@ titan_write_config(struct pci_bus *bus, return PCIBIOS_SUCCESSFUL; } -struct pci_ops titan_pci_ops = +const struct pci_ops titan_pci_ops = { .read = titan_read_config, .write = titan_write_config, diff -urNp linux-2.6.39.1/arch/alpha/kernel/core_tsunami.c linux-2.6.39.1/arch/alpha/kernel/core_tsunami.c --- linux-2.6.39.1/arch/alpha/kernel/core_tsunami.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/kernel/core_tsunami.c 2011-05-22 19:36:30.000000000 -0400 @@ -166,7 +166,7 @@ tsunami_write_config(struct pci_bus *bus return PCIBIOS_SUCCESSFUL; } -struct pci_ops tsunami_pci_ops = +const struct pci_ops tsunami_pci_ops = { .read = tsunami_read_config, .write = tsunami_write_config, diff -urNp linux-2.6.39.1/arch/alpha/kernel/core_wildfire.c linux-2.6.39.1/arch/alpha/kernel/core_wildfire.c --- linux-2.6.39.1/arch/alpha/kernel/core_wildfire.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/kernel/core_wildfire.c 2011-05-22 19:36:30.000000000 -0400 @@ -431,7 +431,7 @@ wildfire_write_config(struct pci_bus *bu return PCIBIOS_SUCCESSFUL; } -struct pci_ops wildfire_pci_ops = +const struct pci_ops wildfire_pci_ops = { .read = wildfire_read_config, .write = wildfire_write_config, diff -urNp linux-2.6.39.1/arch/alpha/kernel/module.c linux-2.6.39.1/arch/alpha/kernel/module.c --- linux-2.6.39.1/arch/alpha/kernel/module.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/kernel/module.c 2011-05-22 19:36:30.000000000 -0400 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, /* The small sections were sorted to the end of the segment. The following should definitely cover them. */ - gp = (u64)me->module_core + me->core_size - 0x8000; + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000; got = sechdrs[me->arch.gotsecindex].sh_addr; for (i = 0; i < n; i++) { diff -urNp linux-2.6.39.1/arch/alpha/kernel/osf_sys.c linux-2.6.39.1/arch/alpha/kernel/osf_sys.c --- linux-2.6.39.1/arch/alpha/kernel/osf_sys.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:07.000000000 -0400 @@ -409,7 +409,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char return -EFAULT; len = namelen; - if (namelen > 32) + if (len > 32) len = 32; down_read(&uts_sem); @@ -594,7 +594,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, comman down_read(&uts_sem); res = sysinfo_table[offset]; len = strlen(res)+1; - if (len > count) + if ((unsigned long)len > (unsigned long)count) len = count; if (copy_to_user(buf, res, len)) err = -EFAULT; @@ -649,7 +649,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned return 1; case GSI_GET_HWRPB: - if (nbytes < sizeof(*hwrpb)) + if (nbytes > sizeof(*hwrpb)) return -EINVAL; if (copy_to_user(buffer, hwrpb, nbytes) != 0) return -EFAULT; @@ -1008,6 +1008,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i { struct rusage r; long ret, err; + unsigned int status = 0; mm_segment_t old_fs; if (!ur) @@ -1016,13 +1017,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i old_fs = get_fs(); set_fs (KERNEL_DS); - ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r); + ret = sys_wait4(pid, (unsigned int __user *) &status, options, + (struct rusage __user *) &r); set_fs (old_fs); if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) return -EFAULT; err = 0; + err |= put_user(status, ustatus); err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); @@ -1142,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a /* At this point: (!vma || addr < vma->vm_end). */ if (limit - len < addr) return -ENOMEM; - if (!vma || addr + len <= vma->vm_start) + if (check_heap_stack_gap(vma, addr, len)) return addr; addr = vma->vm_end; vma = vma->vm_next; @@ -1178,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp merely specific addresses, but regions of memory -- perhaps this feature should be incorporated into all ports? */ +#ifdef CONFIG_PAX_RANDMMAP + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); if (addr != (unsigned long) -ENOMEM) @@ -1185,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp } /* Next, try allocating at TASK_UNMAPPED_BASE. */ - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), - len, limit); + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit); + if (addr != (unsigned long) -ENOMEM) return addr; diff -urNp linux-2.6.39.1/arch/alpha/kernel/pci_iommu.c linux-2.6.39.1/arch/alpha/kernel/pci_iommu.c --- linux-2.6.39.1/arch/alpha/kernel/pci_iommu.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/kernel/pci_iommu.c 2011-05-22 19:36:30.000000000 -0400 @@ -950,7 +950,7 @@ static int alpha_pci_set_mask(struct dev return 0; } -struct dma_map_ops alpha_pci_ops = { +const struct dma_map_ops alpha_pci_ops = { .alloc_coherent = alpha_pci_alloc_coherent, .free_coherent = alpha_pci_free_coherent, .map_page = alpha_pci_map_page, @@ -962,5 +962,5 @@ struct dma_map_ops alpha_pci_ops = { .set_dma_mask = alpha_pci_set_mask, }; -struct dma_map_ops *dma_ops = &alpha_pci_ops; +const struct dma_map_ops *dma_ops = &alpha_pci_ops; EXPORT_SYMBOL(dma_ops); diff -urNp linux-2.6.39.1/arch/alpha/kernel/pci-noop.c linux-2.6.39.1/arch/alpha/kernel/pci-noop.c --- linux-2.6.39.1/arch/alpha/kernel/pci-noop.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/kernel/pci-noop.c 2011-05-22 19:36:30.000000000 -0400 @@ -173,7 +173,7 @@ static int alpha_noop_set_mask(struct de return 0; } -struct dma_map_ops alpha_noop_ops = { +const struct dma_map_ops alpha_noop_ops = { .alloc_coherent = alpha_noop_alloc_coherent, .free_coherent = alpha_noop_free_coherent, .map_page = alpha_noop_map_page, @@ -183,7 +183,7 @@ struct dma_map_ops alpha_noop_ops = { .set_dma_mask = alpha_noop_set_mask, }; -struct dma_map_ops *dma_ops = &alpha_noop_ops; +const struct dma_map_ops *dma_ops = &alpha_noop_ops; EXPORT_SYMBOL(dma_ops); void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) diff -urNp linux-2.6.39.1/arch/alpha/kernel/proto.h linux-2.6.39.1/arch/alpha/kernel/proto.h --- linux-2.6.39.1/arch/alpha/kernel/proto.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/kernel/proto.h 2011-05-22 19:36:30.000000000 -0400 @@ -17,14 +17,14 @@ struct pci_dev; struct pci_controller; /* core_apecs.c */ -extern struct pci_ops apecs_pci_ops; +extern const struct pci_ops apecs_pci_ops; extern void apecs_init_arch(void); extern void apecs_pci_clr_err(void); extern void apecs_machine_check(unsigned long vector, unsigned long la_ptr); extern void apecs_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); /* core_cia.c */ -extern struct pci_ops cia_pci_ops; +extern const struct pci_ops cia_pci_ops; extern void cia_init_pci(void); extern void cia_init_arch(void); extern void pyxis_init_arch(void); @@ -33,19 +33,19 @@ extern void cia_machine_check(unsigned l extern void cia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); /* core_irongate.c */ -extern struct pci_ops irongate_pci_ops; +extern const struct pci_ops irongate_pci_ops; extern int irongate_pci_clr_err(void); extern void irongate_init_arch(void); #define irongate_pci_tbi ((void *)0) /* core_lca.c */ -extern struct pci_ops lca_pci_ops; +extern const struct pci_ops lca_pci_ops; extern void lca_init_arch(void); extern void lca_machine_check(unsigned long vector, unsigned long la_ptr); extern void lca_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); /* core_marvel.c */ -extern struct pci_ops marvel_pci_ops; +extern const struct pci_ops marvel_pci_ops; extern void marvel_init_arch(void); extern void marvel_kill_arch(int); extern void marvel_machine_check(unsigned long, unsigned long); @@ -60,14 +60,14 @@ struct io7 *marvel_next_io7(struct io7 * void io7_clear_errors(struct io7 *io7); /* core_mcpcia.c */ -extern struct pci_ops mcpcia_pci_ops; +extern const struct pci_ops mcpcia_pci_ops; extern void mcpcia_init_arch(void); extern void mcpcia_init_hoses(void); extern void mcpcia_machine_check(unsigned long vector, unsigned long la_ptr); extern void mcpcia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); /* core_polaris.c */ -extern struct pci_ops polaris_pci_ops; +extern const struct pci_ops polaris_pci_ops; extern int polaris_read_config_dword(struct pci_dev *, int, u32 *); extern int polaris_write_config_dword(struct pci_dev *, int, u32); extern void polaris_init_arch(void); @@ -75,14 +75,14 @@ extern void polaris_machine_check(unsign #define polaris_pci_tbi ((void *)0) /* core_t2.c */ -extern struct pci_ops t2_pci_ops; +extern const struct pci_ops t2_pci_ops; extern void t2_init_arch(void); extern void t2_kill_arch(int); extern void t2_machine_check(unsigned long vector, unsigned long la_ptr); extern void t2_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); /* core_titan.c */ -extern struct pci_ops titan_pci_ops; +extern const struct pci_ops titan_pci_ops; extern void titan_init_arch(void); extern void titan_kill_arch(int); extern void titan_machine_check(unsigned long, unsigned long); @@ -90,14 +90,14 @@ extern void titan_pci_tbi(struct pci_con extern struct _alpha_agp_info *titan_agp_info(void); /* core_tsunami.c */ -extern struct pci_ops tsunami_pci_ops; +extern const struct pci_ops tsunami_pci_ops; extern void tsunami_init_arch(void); extern void tsunami_kill_arch(int); extern void tsunami_machine_check(unsigned long vector, unsigned long la_ptr); extern void tsunami_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); /* core_wildfire.c */ -extern struct pci_ops wildfire_pci_ops; +extern const struct pci_ops wildfire_pci_ops; extern void wildfire_init_arch(void); extern void wildfire_kill_arch(int); extern void wildfire_machine_check(unsigned long vector, unsigned long la_ptr); diff -urNp linux-2.6.39.1/arch/alpha/mm/fault.c linux-2.6.39.1/arch/alpha/mm/fault.c --- linux-2.6.39.1/arch/alpha/mm/fault.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/alpha/mm/fault.c 2011-05-22 19:36:30.000000000 -0400 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct * __reload_thread(pcb); } +#ifdef CONFIG_PAX_PAGEEXEC +/* + * PaX: decide what to do with offenders (regs->pc = fault address) + * + * returns 1 when task should be killed + * 2 when patched PLT trampoline was detected + * 3 when unpatched PLT trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#ifdef CONFIG_PAX_EMUPLT + int err; + + do { /* PaX: patched PLT emulation #1 */ + unsigned int ldah, ldq, jmp; + + err = get_user(ldah, (unsigned int *)regs->pc); + err |= get_user(ldq, (unsigned int *)(regs->pc+4)); + err |= get_user(jmp, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((ldah & 0xFFFF0000U) == 0x277B0000U && + (ldq & 0xFFFF0000U) == 0xA77B0000U && + jmp == 0x6BFB0000U) + { + unsigned long r27, addr; + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL; + + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); + err = get_user(r27, (unsigned long *)addr); + if (err) + break; + + regs->r27 = r27; + regs->pc = r27; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #2 */ + unsigned int ldah, lda, br; + + err = get_user(ldah, (unsigned int *)regs->pc); + err |= get_user(lda, (unsigned int *)(regs->pc+4)); + err |= get_user(br, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((ldah & 0xFFFF0000U) == 0x277B0000U && + (lda & 0xFFFF0000U) == 0xA77B0000U && + (br & 0xFFE00000U) == 0xC3E00000U) + { + unsigned long addr = br | 0xFFFFFFFFFFE00000UL; + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL; + + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); + return 2; + } + } while (0); + + do { /* PaX: unpatched PLT emulation */ + unsigned int br; + + err = get_user(br, (unsigned int *)regs->pc); + + if (!err && (br & 0xFFE00000U) == 0xC3800000U) { + unsigned int br2, ldq, nop, jmp; + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver; + + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); + err = get_user(br2, (unsigned int *)addr); + err |= get_user(ldq, (unsigned int *)(addr+4)); + err |= get_user(nop, (unsigned int *)(addr+8)); + err |= get_user(jmp, (unsigned int *)(addr+12)); + err |= get_user(resolver, (unsigned long *)(addr+16)); + + if (err) + break; + + if (br2 == 0xC3600000U && + ldq == 0xA77B000CU && + nop == 0x47FF041FU && + jmp == 0x6B7B0000U) + { + regs->r28 = regs->pc+4; + regs->r27 = addr+16; + regs->pc = resolver; + return 3; + } + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif /* * This routine handles page faults. It determines the address, @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns good_area: si_code = SEGV_ACCERR; if (cause < 0) { - if (!(vma->vm_flags & VM_EXEC)) + if (!(vma->vm_flags & VM_EXEC)) { + +#ifdef CONFIG_PAX_PAGEEXEC + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc) + goto bad_area; + + up_read(&mm->mmap_sem); + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_PAX_EMUPLT + case 2: + case 3: + return; +#endif + + } + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp()); + do_group_exit(SIGKILL); +#else goto bad_area; +#endif + + } } else if (!cause) { /* Allow reads even for write-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_WRITE))) diff -urNp linux-2.6.39.1/arch/arm/common/it8152.c linux-2.6.39.1/arch/arm/common/it8152.c --- linux-2.6.39.1/arch/arm/common/it8152.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/common/it8152.c 2011-05-22 19:36:30.000000000 -0400 @@ -221,7 +221,7 @@ static int it8152_pci_write_config(struc return PCIBIOS_SUCCESSFUL; } -static struct pci_ops it8152_ops = { +static const struct pci_ops it8152_ops = { .read = it8152_pci_read_config, .write = it8152_pci_write_config, }; diff -urNp linux-2.6.39.1/arch/arm/common/via82c505.c linux-2.6.39.1/arch/arm/common/via82c505.c --- linux-2.6.39.1/arch/arm/common/via82c505.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/common/via82c505.c 2011-05-22 19:36:30.000000000 -0400 @@ -52,7 +52,7 @@ via82c505_write_config(struct pci_bus *b return PCIBIOS_SUCCESSFUL; } -static struct pci_ops via82c505_ops = { +static const struct pci_ops via82c505_ops = { .read = via82c505_read_config, .write = via82c505_write_config, }; diff -urNp linux-2.6.39.1/arch/arm/include/asm/cacheflush.h linux-2.6.39.1/arch/arm/include/asm/cacheflush.h --- linux-2.6.39.1/arch/arm/include/asm/cacheflush.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/include/asm/cacheflush.h 2011-05-22 19:36:30.000000000 -0400 @@ -115,7 +115,7 @@ struct cpu_cache_fns { */ #ifdef MULTI_CACHE -extern struct cpu_cache_fns cpu_cache; +extern const struct cpu_cache_fns cpu_cache; #define __cpuc_flush_icache_all cpu_cache.flush_icache_all #define __cpuc_flush_kern_all cpu_cache.flush_kern_all diff -urNp linux-2.6.39.1/arch/arm/include/asm/elf.h linux-2.6.39.1/arch/arm/include/asm/elf.h --- linux-2.6.39.1/arch/arm/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/include/asm/elf.h 2011-05-22 19:36:30.000000000 -0400 @@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) + +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE 0x00008000UL + +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) +#endif /* When the program starts, a1 contains a pointer to a function to be registered with atexit, as per the SVR4 ABI. A value of 0 means we @@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t extern void elf_set_personality(const struct elf32_hdr *); #define SET_PERSONALITY(ex) elf_set_personality(&(ex)) -struct mm_struct; -extern unsigned long arch_randomize_brk(struct mm_struct *mm); -#define arch_randomize_brk arch_randomize_brk - extern int vectors_user_mapping(void); #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping() #define ARCH_HAS_SETUP_ADDITIONAL_PAGES diff -urNp linux-2.6.39.1/arch/arm/include/asm/kmap_types.h linux-2.6.39.1/arch/arm/include/asm/kmap_types.h --- linux-2.6.39.1/arch/arm/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/include/asm/kmap_types.h 2011-05-22 19:36:30.000000000 -0400 @@ -21,6 +21,7 @@ enum km_type { KM_L1_CACHE, KM_L2_CACHE, KM_KDB, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.39.1/arch/arm/include/asm/outercache.h linux-2.6.39.1/arch/arm/include/asm/outercache.h --- linux-2.6.39.1/arch/arm/include/asm/outercache.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/include/asm/outercache.h 2011-05-22 19:36:30.000000000 -0400 @@ -38,7 +38,7 @@ struct outer_cache_fns { #ifdef CONFIG_OUTER_CACHE -extern struct outer_cache_fns outer_cache; +extern const struct outer_cache_fns outer_cache; static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) { diff -urNp linux-2.6.39.1/arch/arm/include/asm/page.h linux-2.6.39.1/arch/arm/include/asm/page.h --- linux-2.6.39.1/arch/arm/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/include/asm/page.h 2011-05-22 19:36:30.000000000 -0400 @@ -126,7 +126,7 @@ struct cpu_user_fns { }; #ifdef MULTI_USER -extern struct cpu_user_fns cpu_user; +extern const struct cpu_user_fns cpu_user; #define __cpu_clear_user_highpage cpu_user.cpu_clear_user_highpage #define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage diff -urNp linux-2.6.39.1/arch/arm/include/asm/uaccess.h linux-2.6.39.1/arch/arm/include/asm/uaccess.h --- linux-2.6.39.1/arch/arm/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/include/asm/uaccess.h 2011-05-22 19:36:30.000000000 -0400 @@ -403,6 +403,9 @@ extern unsigned long __must_check __strn static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { + if ((long)n < 0) + return n; + if (access_ok(VERIFY_READ, from, n)) n = __copy_from_user(to, from, n); else /* security hole - plug it */ @@ -412,6 +415,9 @@ static inline unsigned long __must_check static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { + if ((long)n < 0) + return n; + if (access_ok(VERIFY_WRITE, to, n)) n = __copy_to_user(to, from, n); return n; diff -urNp linux-2.6.39.1/arch/arm/kernel/kgdb.c linux-2.6.39.1/arch/arm/kernel/kgdb.c --- linux-2.6.39.1/arch/arm/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/kernel/kgdb.c 2011-05-22 19:36:30.000000000 -0400 @@ -246,7 +246,7 @@ void kgdb_arch_exit(void) * and we handle the normal undef case within the do_undefinstr * handler. */ -struct kgdb_arch arch_kgdb_ops = { +const struct kgdb_arch arch_kgdb_ops = { #ifndef __ARMEB__ .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7} #else /* ! __ARMEB__ */ diff -urNp linux-2.6.39.1/arch/arm/kernel/process.c linux-2.6.39.1/arch/arm/kernel/process.c --- linux-2.6.39.1/arch/arm/kernel/process.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/kernel/process.c 2011-05-22 19:36:30.000000000 -0400 @@ -28,7 +28,6 @@ #include #include #include -#include #include #include @@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru return 0; } -unsigned long arch_randomize_brk(struct mm_struct *mm) -{ - unsigned long range_end = mm->brk + 0x02000000; - return randomize_range(mm->brk, range_end, 0) ? : mm->brk; -} - #ifdef CONFIG_MMU /* * The vectors page is always readable from user space for the diff -urNp linux-2.6.39.1/arch/arm/mach-cns3xxx/pcie.c linux-2.6.39.1/arch/arm/mach-cns3xxx/pcie.c --- linux-2.6.39.1/arch/arm/mach-cns3xxx/pcie.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-cns3xxx/pcie.c 2011-05-22 19:36:30.000000000 -0400 @@ -162,7 +162,7 @@ static int cns3xxx_pci_setup(int nr, str return 1; } -static struct pci_ops cns3xxx_pcie_ops = { +static const struct pci_ops cns3xxx_pcie_ops = { .read = cns3xxx_pci_read_config, .write = cns3xxx_pci_write_config, }; diff -urNp linux-2.6.39.1/arch/arm/mach-dove/pcie.c linux-2.6.39.1/arch/arm/mach-dove/pcie.c --- linux-2.6.39.1/arch/arm/mach-dove/pcie.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-dove/pcie.c 2011-05-22 19:36:30.000000000 -0400 @@ -155,7 +155,7 @@ static int pcie_wr_conf(struct pci_bus * return ret; } -static struct pci_ops pcie_ops = { +static const struct pci_ops pcie_ops = { .read = pcie_rd_conf, .write = pcie_wr_conf, }; diff -urNp linux-2.6.39.1/arch/arm/mach-footbridge/dc21285.c linux-2.6.39.1/arch/arm/mach-footbridge/dc21285.c --- linux-2.6.39.1/arch/arm/mach-footbridge/dc21285.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-footbridge/dc21285.c 2011-05-22 19:36:30.000000000 -0400 @@ -129,7 +129,7 @@ dc21285_write_config(struct pci_bus *bus return PCIBIOS_SUCCESSFUL; } -static struct pci_ops dc21285_ops = { +static const struct pci_ops dc21285_ops = { .read = dc21285_read_config, .write = dc21285_write_config, }; diff -urNp linux-2.6.39.1/arch/arm/mach-integrator/pci_v3.c linux-2.6.39.1/arch/arm/mach-integrator/pci_v3.c --- linux-2.6.39.1/arch/arm/mach-integrator/pci_v3.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-integrator/pci_v3.c 2011-05-22 19:36:30.000000000 -0400 @@ -340,7 +340,7 @@ static int v3_write_config(struct pci_bu return PCIBIOS_SUCCESSFUL; } -static struct pci_ops pci_v3_ops = { +static const struct pci_ops pci_v3_ops = { .read = v3_read_config, .write = v3_write_config, }; diff -urNp linux-2.6.39.1/arch/arm/mach-iop13xx/pci.c linux-2.6.39.1/arch/arm/mach-iop13xx/pci.c --- linux-2.6.39.1/arch/arm/mach-iop13xx/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-iop13xx/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -324,7 +324,7 @@ iop13xx_atux_write_config(struct pci_bus return PCIBIOS_SUCCESSFUL; } -static struct pci_ops iop13xx_atux_ops = { +static const struct pci_ops iop13xx_atux_ops = { .read = iop13xx_atux_read_config, .write = iop13xx_atux_write_config, }; @@ -471,7 +471,7 @@ iop13xx_atue_write_config(struct pci_bus return PCIBIOS_SUCCESSFUL; } -static struct pci_ops iop13xx_atue_ops = { +static const struct pci_ops iop13xx_atue_ops = { .read = iop13xx_atue_read_config, .write = iop13xx_atue_write_config, }; diff -urNp linux-2.6.39.1/arch/arm/mach-ixp2000/enp2611.c linux-2.6.39.1/arch/arm/mach-ixp2000/enp2611.c --- linux-2.6.39.1/arch/arm/mach-ixp2000/enp2611.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-ixp2000/enp2611.c 2011-05-22 19:36:30.000000000 -0400 @@ -137,7 +137,7 @@ static int enp2611_pci_write_config(stru return PCIBIOS_DEVICE_NOT_FOUND; } -static struct pci_ops enp2611_pci_ops = { +static const struct pci_ops enp2611_pci_ops = { .read = enp2611_pci_read_config, .write = enp2611_pci_write_config }; diff -urNp linux-2.6.39.1/arch/arm/mach-ixp2000/pci.c linux-2.6.39.1/arch/arm/mach-ixp2000/pci.c --- linux-2.6.39.1/arch/arm/mach-ixp2000/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-ixp2000/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -125,7 +125,7 @@ int ixp2000_pci_write_config(struct pci_ } -static struct pci_ops ixp2000_pci_ops = { +static const struct pci_ops ixp2000_pci_ops = { .read = ixp2000_pci_read_config, .write = ixp2000_pci_write_config }; diff -urNp linux-2.6.39.1/arch/arm/mach-ixp23xx/pci.c linux-2.6.39.1/arch/arm/mach-ixp23xx/pci.c --- linux-2.6.39.1/arch/arm/mach-ixp23xx/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-ixp23xx/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -136,7 +136,7 @@ static int ixp23xx_pci_write_config(stru return PCIBIOS_SUCCESSFUL; } -struct pci_ops ixp23xx_pci_ops = { +const struct pci_ops ixp23xx_pci_ops = { .read = ixp23xx_pci_read_config, .write = ixp23xx_pci_write_config, }; diff -urNp linux-2.6.39.1/arch/arm/mach-ixp4xx/common-pci.c linux-2.6.39.1/arch/arm/mach-ixp4xx/common-pci.c --- linux-2.6.39.1/arch/arm/mach-ixp4xx/common-pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-ixp4xx/common-pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -283,7 +283,7 @@ static int ixp4xx_pci_write_config(struc return PCIBIOS_SUCCESSFUL; } -struct pci_ops ixp4xx_ops = { +const struct pci_ops ixp4xx_ops = { .read = ixp4xx_pci_read_config, .write = ixp4xx_pci_write_config, }; diff -urNp linux-2.6.39.1/arch/arm/mach-kirkwood/pcie.c linux-2.6.39.1/arch/arm/mach-kirkwood/pcie.c --- linux-2.6.39.1/arch/arm/mach-kirkwood/pcie.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-kirkwood/pcie.c 2011-05-22 19:36:30.000000000 -0400 @@ -111,7 +111,7 @@ static int pcie_wr_conf(struct pci_bus * return ret; } -static struct pci_ops pcie_ops = { +static const struct pci_ops pcie_ops = { .read = pcie_rd_conf, .write = pcie_wr_conf, }; diff -urNp linux-2.6.39.1/arch/arm/mach-ks8695/pci.c linux-2.6.39.1/arch/arm/mach-ks8695/pci.c --- linux-2.6.39.1/arch/arm/mach-ks8695/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-ks8695/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -136,7 +136,7 @@ static void ks8695_local_writeconfig(int __raw_writel(value, KS8695_PCI_VA + KS8695_PBCD); } -static struct pci_ops ks8695_pci_ops = { +static const struct pci_ops ks8695_pci_ops = { .read = ks8695_pci_readconfig, .write = ks8695_pci_writeconfig, }; diff -urNp linux-2.6.39.1/arch/arm/mach-mmp/clock.c linux-2.6.39.1/arch/arm/mach-mmp/clock.c --- linux-2.6.39.1/arch/arm/mach-mmp/clock.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-mmp/clock.c 2011-05-22 19:36:30.000000000 -0400 @@ -29,7 +29,7 @@ static void apbc_clk_disable(struct clk __raw_writel(0, clk->clk_rst); } -struct clkops apbc_clk_ops = { +const struct clkops apbc_clk_ops = { .enable = apbc_clk_enable, .disable = apbc_clk_disable, }; @@ -44,7 +44,7 @@ static void apmu_clk_disable(struct clk __raw_writel(0, clk->clk_rst); } -struct clkops apmu_clk_ops = { +const struct clkops apmu_clk_ops = { .enable = apmu_clk_enable, .disable = apmu_clk_disable, }; diff -urNp linux-2.6.39.1/arch/arm/mach-msm/iommu.c linux-2.6.39.1/arch/arm/mach-msm/iommu.c --- linux-2.6.39.1/arch/arm/mach-msm/iommu.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-msm/iommu.c 2011-05-22 19:36:30.000000000 -0400 @@ -669,7 +669,7 @@ fail: return 0; } -static struct iommu_ops msm_iommu_ops = { +static const struct iommu_ops msm_iommu_ops = { .domain_init = msm_iommu_domain_init, .domain_destroy = msm_iommu_domain_destroy, .attach_dev = msm_iommu_attach_dev, diff -urNp linux-2.6.39.1/arch/arm/mach-msm/last_radio_log.c linux-2.6.39.1/arch/arm/mach-msm/last_radio_log.c --- linux-2.6.39.1/arch/arm/mach-msm/last_radio_log.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-msm/last_radio_log.c 2011-05-22 19:36:30.000000000 -0400 @@ -48,6 +48,7 @@ static ssize_t last_radio_log_read(struc } static struct file_operations last_radio_log_fops = { + /* cannot be const, see msm_init_last_radio_log */ .read = last_radio_log_read, .llseek = default_llseek, }; diff -urNp linux-2.6.39.1/arch/arm/mach-mv78xx0/pcie.c linux-2.6.39.1/arch/arm/mach-mv78xx0/pcie.c --- linux-2.6.39.1/arch/arm/mach-mv78xx0/pcie.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-mv78xx0/pcie.c 2011-05-22 19:36:30.000000000 -0400 @@ -222,7 +222,7 @@ static int pcie_wr_conf(struct pci_bus * return ret; } -static struct pci_ops pcie_ops = { +static const struct pci_ops pcie_ops = { .read = pcie_rd_conf, .write = pcie_wr_conf, }; diff -urNp linux-2.6.39.1/arch/arm/mach-orion5x/pci.c linux-2.6.39.1/arch/arm/mach-orion5x/pci.c --- linux-2.6.39.1/arch/arm/mach-orion5x/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-orion5x/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -130,7 +130,7 @@ static int pcie_wr_conf(struct pci_bus * return ret; } -static struct pci_ops pcie_ops = { +static const struct pci_ops pcie_ops = { .read = pcie_rd_conf, .write = pcie_wr_conf, }; @@ -368,7 +368,7 @@ static int orion5x_pci_wr_conf(struct pc PCI_FUNC(devfn), where, size, val); } -static struct pci_ops pci_ops = { +static const struct pci_ops pci_ops = { .read = orion5x_pci_rd_conf, .write = orion5x_pci_wr_conf, }; diff -urNp linux-2.6.39.1/arch/arm/mach-sa1100/pci-nanoengine.c linux-2.6.39.1/arch/arm/mach-sa1100/pci-nanoengine.c --- linux-2.6.39.1/arch/arm/mach-sa1100/pci-nanoengine.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-sa1100/pci-nanoengine.c 2011-05-22 19:36:30.000000000 -0400 @@ -117,7 +117,7 @@ static int nanoengine_write_config(struc return PCIBIOS_SUCCESSFUL; } -static struct pci_ops pci_nano_ops = { +static const struct pci_ops pci_nano_ops = { .read = nanoengine_read_config, .write = nanoengine_write_config, }; diff -urNp linux-2.6.39.1/arch/arm/mach-tegra/pcie.c linux-2.6.39.1/arch/arm/mach-tegra/pcie.c --- linux-2.6.39.1/arch/arm/mach-tegra/pcie.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-tegra/pcie.c 2011-05-22 19:36:30.000000000 -0400 @@ -336,7 +336,7 @@ static int tegra_pcie_write_conf(struct return PCIBIOS_SUCCESSFUL; } -static struct pci_ops tegra_pcie_ops = { +static const struct pci_ops tegra_pcie_ops = { .read = tegra_pcie_read_conf, .write = tegra_pcie_write_conf, }; diff -urNp linux-2.6.39.1/arch/arm/mach-ux500/mbox-db5500.c linux-2.6.39.1/arch/arm/mach-ux500/mbox-db5500.c --- linux-2.6.39.1/arch/arm/mach-ux500/mbox-db5500.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-ux500/mbox-db5500.c 2011-05-22 19:41:32.000000000 -0400 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev return sprintf(buf, "0x%X\n", mbox_value); } -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo); +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo); static int mbox_show(struct seq_file *s, void *data) { diff -urNp linux-2.6.39.1/arch/arm/mach-versatile/pci.c linux-2.6.39.1/arch/arm/mach-versatile/pci.c --- linux-2.6.39.1/arch/arm/mach-versatile/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mach-versatile/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -165,7 +165,7 @@ static int versatile_write_config(struct return PCIBIOS_SUCCESSFUL; } -static struct pci_ops pci_versatile_ops = { +static const struct pci_ops pci_versatile_ops = { .read = versatile_read_config, .write = versatile_write_config, }; diff -urNp linux-2.6.39.1/arch/arm/mm/fault.c linux-2.6.39.1/arch/arm/mm/fault.c --- linux-2.6.39.1/arch/arm/mm/fault.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mm/fault.c 2011-05-22 19:36:30.000000000 -0400 @@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk, } #endif +#ifdef CONFIG_PAX_PAGEEXEC + if (fsr & FSR_LNX_PF) { + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp); + do_group_exit(SIGKILL); + } +#endif + tsk->thread.address = addr; tsk->thread.error_code = fsr; tsk->thread.trap_no = 14; @@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign } #endif /* CONFIG_MMU */ +#ifdef CONFIG_PAX_PAGEEXEC +void pax_report_insns(void *pc, void *sp) +{ + long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 20; i++) { + unsigned char c; + if (get_user(c, (__force unsigned char __user *)pc+i)) + printk(KERN_CONT "?? "); + else + printk(KERN_CONT "%02x ", c); + } + printk("\n"); + + printk(KERN_ERR "PAX: bytes at SP-4: "); + for (i = -1; i < 20; i++) { + unsigned long c; + if (get_user(c, (__force unsigned long __user *)sp+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08lx ", c); + } + printk("\n"); +} +#endif + /* * First Level Translation Fault Handler * diff -urNp linux-2.6.39.1/arch/arm/mm/mmap.c linux-2.6.39.1/arch/arm/mm/mmap.c --- linux-2.6.39.1/arch/arm/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/mm/mmap.c 2011-05-22 19:36:30.000000000 -0400 @@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp if (len > TASK_SIZE) return -ENOMEM; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { if (do_align) addr = COLOUR_ALIGN(addr, pgoff); @@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; + start_addr = addr = mm->free_area_cache; } else { - start_addr = addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; + start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; } /* 8 bits of randomness in 20 address space bits */ if ((current->flags & PF_RANDOMIZE) && @@ -100,14 +103,14 @@ full_search: * Start a new search - just in case we missed * some holes. */ - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; + if (start_addr != mm->mmap_base) { + start_addr = addr = mm->mmap_base; mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { + if (check_heap_stack_gap(vma, addr, len)) { /* * Remember the place where we stopped the search: */ diff -urNp linux-2.6.39.1/arch/arm/plat-iop/pci.c linux-2.6.39.1/arch/arm/plat-iop/pci.c --- linux-2.6.39.1/arch/arm/plat-iop/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/arm/plat-iop/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -161,7 +161,7 @@ iop3xx_write_config(struct pci_bus *bus, return PCIBIOS_SUCCESSFUL; } -static struct pci_ops iop3xx_ops = { +static const struct pci_ops iop3xx_ops = { .read = iop3xx_read_config, .write = iop3xx_write_config, }; diff -urNp linux-2.6.39.1/arch/avr32/include/asm/elf.h linux-2.6.39.1/arch/avr32/include/asm/elf.h --- linux-2.6.39.1/arch/avr32/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/avr32/include/asm/elf.h 2011-05-22 19:36:30.000000000 -0400 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE 0x00001000UL + +#define PAX_DELTA_MMAP_LEN 15 +#define PAX_DELTA_STACK_LEN 15 +#endif /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, diff -urNp linux-2.6.39.1/arch/avr32/include/asm/kmap_types.h linux-2.6.39.1/arch/avr32/include/asm/kmap_types.h --- linux-2.6.39.1/arch/avr32/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/avr32/include/asm/kmap_types.h 2011-05-22 19:36:30.000000000 -0400 @@ -22,7 +22,8 @@ D(10) KM_IRQ0, D(11) KM_IRQ1, D(12) KM_SOFTIRQ0, D(13) KM_SOFTIRQ1, -D(14) KM_TYPE_NR +D(14) KM_CLEARPAGE, +D(15) KM_TYPE_NR }; #undef D diff -urNp linux-2.6.39.1/arch/avr32/mm/fault.c linux-2.6.39.1/arch/avr32/mm/fault.c --- linux-2.6.39.1/arch/avr32/mm/fault.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/avr32/mm/fault.c 2011-05-22 19:36:30.000000000 -0400 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru int exception_trace = 1; +#ifdef CONFIG_PAX_PAGEEXEC +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 20; i++) { + unsigned char c; + if (get_user(c, (unsigned char *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%02x ", c); + } + printk("\n"); +} +#endif + /* * This routine handles page faults. It determines the address and the * problem, and then passes it off to one of the appropriate routines. @@ -156,6 +173,16 @@ bad_area: up_read(&mm->mmap_sem); if (user_mode(regs)) { + +#ifdef CONFIG_PAX_PAGEEXEC + if (mm->pax_flags & MF_PAX_PAGEEXEC) { + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) { + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp); + do_group_exit(SIGKILL); + } + } +#endif + if (exception_trace && printk_ratelimit()) printk("%s%s[%d]: segfault at %08lx pc %08lx " "sp %08lx ecr %lu\n", diff -urNp linux-2.6.39.1/arch/blackfin/kernel/kgdb.c linux-2.6.39.1/arch/blackfin/kernel/kgdb.c --- linux-2.6.39.1/arch/blackfin/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/blackfin/kernel/kgdb.c 2011-05-22 19:36:30.000000000 -0400 @@ -420,7 +420,7 @@ int kgdb_arch_handle_exception(int vecto return -1; /* this means that we do not want to exit from the handler */ } -struct kgdb_arch arch_kgdb_ops = { +const struct kgdb_arch arch_kgdb_ops = { .gdb_bpt_instr = {0xa1}, .flags = KGDB_HW_BREAKPOINT, .set_hw_breakpoint = bfin_set_hw_break, diff -urNp linux-2.6.39.1/arch/blackfin/mm/maccess.c linux-2.6.39.1/arch/blackfin/mm/maccess.c --- linux-2.6.39.1/arch/blackfin/mm/maccess.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/blackfin/mm/maccess.c 2011-05-22 19:36:30.000000000 -0400 @@ -16,7 +16,7 @@ static int validate_memory_access_addres return bfin_mem_access_type(addr, size); } -long probe_kernel_read(void *dst, void *src, size_t size) +long probe_kernel_read(void *dst, const void *src, size_t size) { unsigned long lsrc = (unsigned long)src; int mem_type; @@ -55,7 +55,7 @@ long probe_kernel_read(void *dst, void * return -EFAULT; } -long probe_kernel_write(void *dst, void *src, size_t size) +long probe_kernel_write(void *dst, const void *src, size_t size) { unsigned long ldst = (unsigned long)dst; int mem_type; diff -urNp linux-2.6.39.1/arch/frv/include/asm/kmap_types.h linux-2.6.39.1/arch/frv/include/asm/kmap_types.h --- linux-2.6.39.1/arch/frv/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/frv/include/asm/kmap_types.h 2011-05-22 19:36:30.000000000 -0400 @@ -23,6 +23,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.39.1/arch/frv/mb93090-mb00/pci-frv.h linux-2.6.39.1/arch/frv/mb93090-mb00/pci-frv.h --- linux-2.6.39.1/arch/frv/mb93090-mb00/pci-frv.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/frv/mb93090-mb00/pci-frv.h 2011-05-22 19:36:30.000000000 -0400 @@ -34,7 +34,7 @@ void pcibios_resource_survey(void); extern int __nongpreldata pcibios_last_bus; extern struct pci_bus *__nongpreldata pci_root_bus; -extern struct pci_ops *__nongpreldata pci_root_ops; +extern const struct pci_ops *__nongpreldata pci_root_ops; /* pci-irq.c */ extern unsigned int pcibios_irq_mask; diff -urNp linux-2.6.39.1/arch/frv/mb93090-mb00/pci-vdk.c linux-2.6.39.1/arch/frv/mb93090-mb00/pci-vdk.c --- linux-2.6.39.1/arch/frv/mb93090-mb00/pci-vdk.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/frv/mb93090-mb00/pci-vdk.c 2011-05-22 19:36:30.000000000 -0400 @@ -27,7 +27,7 @@ unsigned int __nongpreldata pci_probe = int __nongpreldata pcibios_last_bus = -1; struct pci_bus *__nongpreldata pci_root_bus; -struct pci_ops *__nongpreldata pci_root_ops; +const struct pci_ops *__nongpreldata pci_root_ops; /* * The accessible PCI window does not cover the entire CPU address space, but @@ -169,7 +169,7 @@ static int pci_frv_write_config(struct p return PCIBIOS_SUCCESSFUL; } -static struct pci_ops pci_direct_frv = { +static const struct pci_ops pci_direct_frv = { pci_frv_read_config, pci_frv_write_config, }; @@ -356,7 +356,7 @@ void __init pcibios_fixup_bus(struct pci int __init pcibios_init(void) { - struct pci_ops *dir = NULL; + const struct pci_ops *dir = NULL; if (!mb93090_mb00_detected) return -ENXIO; diff -urNp linux-2.6.39.1/arch/frv/mm/elf-fdpic.c linux-2.6.39.1/arch/frv/mm/elf-fdpic.c --- linux-2.6.39.1/arch/frv/mm/elf-fdpic.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/frv/mm/elf-fdpic.c 2011-05-22 19:36:30.000000000 -0400 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(current->mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) goto success; } @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str for (; vma; vma = vma->vm_next) { if (addr > limit) break; - if (addr + len <= vma->vm_start) + if (check_heap_stack_gap(vma, addr, len)) goto success; addr = vma->vm_end; } @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str for (; vma; vma = vma->vm_next) { if (addr > limit) break; - if (addr + len <= vma->vm_start) + if (check_heap_stack_gap(vma, addr, len)) goto success; addr = vma->vm_end; } diff -urNp linux-2.6.39.1/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.39.1/arch/ia64/hp/common/hwsw_iommu.c --- linux-2.6.39.1/arch/ia64/hp/common/hwsw_iommu.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/hp/common/hwsw_iommu.c 2011-05-22 19:36:30.000000000 -0400 @@ -17,7 +17,7 @@ #include #include -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; /* swiotlb declarations & definitions: */ extern int swiotlb_late_init_with_default_size (size_t size); @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev !sba_dma_ops.dma_supported(dev, *dev->dma_mask); } -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) { if (use_swiotlb(dev)) return &swiotlb_dma_ops; diff -urNp linux-2.6.39.1/arch/ia64/hp/common/sba_iommu.c linux-2.6.39.1/arch/ia64/hp/common/sba_iommu.c --- linux-2.6.39.1/arch/ia64/hp/common/sba_iommu.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/hp/common/sba_iommu.c 2011-05-22 19:36:30.000000000 -0400 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d }, }; -extern struct dma_map_ops swiotlb_dma_ops; +extern const struct dma_map_ops swiotlb_dma_ops; static int __init sba_init(void) @@ -2211,7 +2211,7 @@ sba_page_override(char *str) __setup("sbapagesize=",sba_page_override); -struct dma_map_ops sba_dma_ops = { +const struct dma_map_ops sba_dma_ops = { .alloc_coherent = sba_alloc_coherent, .free_coherent = sba_free_coherent, .map_page = sba_map_page, diff -urNp linux-2.6.39.1/arch/ia64/include/asm/dma-mapping.h linux-2.6.39.1/arch/ia64/include/asm/dma-mapping.h --- linux-2.6.39.1/arch/ia64/include/asm/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/include/asm/dma-mapping.h 2011-05-22 19:36:30.000000000 -0400 @@ -14,7 +14,7 @@ #define DMA_ERROR_CODE 0 -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; extern struct ia64_machine_vector ia64_mv; extern void set_iommu_machvec(void); @@ -26,7 +26,7 @@ extern void machvec_dma_sync_sg(struct d static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *daddr, gfp_t gfp) { - struct dma_map_ops *ops = platform_dma_get_ops(dev); + const struct dma_map_ops *ops = platform_dma_get_ops(dev); void *caddr; caddr = ops->alloc_coherent(dev, size, daddr, gfp); @@ -37,7 +37,7 @@ static inline void *dma_alloc_coherent(s static inline void dma_free_coherent(struct device *dev, size_t size, void *caddr, dma_addr_t daddr) { - struct dma_map_ops *ops = platform_dma_get_ops(dev); + const struct dma_map_ops *ops = platform_dma_get_ops(dev); debug_dma_free_coherent(dev, size, caddr, daddr); ops->free_coherent(dev, size, caddr, daddr); } @@ -51,13 +51,13 @@ static inline void dma_free_coherent(str static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) { - struct dma_map_ops *ops = platform_dma_get_ops(dev); + const struct dma_map_ops *ops = platform_dma_get_ops(dev); return ops->mapping_error(dev, daddr); } static inline int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *ops = platform_dma_get_ops(dev); + const struct dma_map_ops *ops = platform_dma_get_ops(dev); return ops->dma_supported(dev, mask); } diff -urNp linux-2.6.39.1/arch/ia64/include/asm/elf.h linux-2.6.39.1/arch/ia64/include/asm/elf.h --- linux-2.6.39.1/arch/ia64/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/include/asm/elf.h 2011-05-22 19:36:30.000000000 -0400 @@ -42,6 +42,13 @@ */ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL) + +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) +#endif + #define PT_IA_64_UNWIND 0x70000001 /* IA-64 relocations: */ diff -urNp linux-2.6.39.1/arch/ia64/include/asm/machvec.h linux-2.6.39.1/arch/ia64/include/asm/machvec.h --- linux-2.6.39.1/arch/ia64/include/asm/machvec.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/include/asm/machvec.h 2011-05-22 19:36:30.000000000 -0400 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event /* DMA-mapping interface: */ typedef void ia64_mv_dma_init (void); typedef u64 ia64_mv_dma_get_required_mask (struct device *); -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); /* * WARNING: The legacy I/O space is _architected_. Platforms are @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co # endif /* CONFIG_IA64_GENERIC */ extern void swiotlb_dma_init(void); -extern struct dma_map_ops *dma_get_ops(struct device *); +extern const struct dma_map_ops *dma_get_ops(struct device *); /* * Define default versions so we can extend machvec for new platforms without having diff -urNp linux-2.6.39.1/arch/ia64/include/asm/pgtable.h linux-2.6.39.1/arch/ia64/include/asm/pgtable.h --- linux-2.6.39.1/arch/ia64/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/include/asm/pgtable.h 2011-05-22 19:36:30.000000000 -0400 @@ -12,7 +12,7 @@ * David Mosberger-Tang */ - +#include #include #include #include @@ -143,6 +143,17 @@ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) + +#ifdef CONFIG_PAX_PAGEEXEC +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW) +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) +#else +# define PAGE_SHARED_NOEXEC PAGE_SHARED +# define PAGE_READONLY_NOEXEC PAGE_READONLY +# define PAGE_COPY_NOEXEC PAGE_COPY +#endif + #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX) diff -urNp linux-2.6.39.1/arch/ia64/include/asm/spinlock.h linux-2.6.39.1/arch/ia64/include/asm/spinlock.h --- linux-2.6.39.1/arch/ia64/include/asm/spinlock.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/include/asm/spinlock.h 2011-05-22 19:36:30.000000000 -0400 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); - ACCESS_ONCE(*p) = (tmp + 2) & ~1; + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1; } static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) diff -urNp linux-2.6.39.1/arch/ia64/include/asm/uaccess.h linux-2.6.39.1/arch/ia64/include/asm/uaccess.h --- linux-2.6.39.1/arch/ia64/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/include/asm/uaccess.h 2011-05-22 19:36:30.000000000 -0400 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _ const void *__cu_from = (from); \ long __cu_len = (n); \ \ - if (__access_ok(__cu_to, __cu_len, get_fs())) \ + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \ __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ __cu_len; \ }) @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _ long __cu_len = (n); \ \ __chk_user_ptr(__cu_from); \ - if (__access_ok(__cu_from, __cu_len, get_fs())) \ + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \ __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ __cu_len; \ }) diff -urNp linux-2.6.39.1/arch/ia64/kernel/dma-mapping.c linux-2.6.39.1/arch/ia64/kernel/dma-mapping.c --- linux-2.6.39.1/arch/ia64/kernel/dma-mapping.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/kernel/dma-mapping.c 2011-05-22 19:36:30.000000000 -0400 @@ -3,7 +3,7 @@ /* Set this to 1 if there is a HW IOMMU in the system */ int iommu_detected __read_mostly; -struct dma_map_ops *dma_ops; +const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) @@ -16,7 +16,7 @@ static int __init dma_init(void) } fs_initcall(dma_init); -struct dma_map_ops *dma_get_ops(struct device *dev) +const struct dma_map_ops *dma_get_ops(struct device *dev) { return dma_ops; } diff -urNp linux-2.6.39.1/arch/ia64/kernel/module.c linux-2.6.39.1/arch/ia64/kernel/module.c --- linux-2.6.39.1/arch/ia64/kernel/module.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/kernel/module.c 2011-05-22 19:36:30.000000000 -0400 @@ -315,8 +315,7 @@ module_alloc (unsigned long size) void module_free (struct module *mod, void *module_region) { - if (mod && mod->arch.init_unw_table && - module_region == mod->module_init) { + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) { unw_remove_unwind_table(mod->arch.init_unw_table); mod->arch.init_unw_table = NULL; } @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd } static inline int +in_init_rx (const struct module *mod, uint64_t addr) +{ + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx; +} + +static inline int +in_init_rw (const struct module *mod, uint64_t addr) +{ + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw; +} + +static inline int in_init (const struct module *mod, uint64_t addr) { - return addr - (uint64_t) mod->module_init < mod->init_size; + return in_init_rx(mod, addr) || in_init_rw(mod, addr); +} + +static inline int +in_core_rx (const struct module *mod, uint64_t addr) +{ + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx; +} + +static inline int +in_core_rw (const struct module *mod, uint64_t addr) +{ + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw; } static inline int in_core (const struct module *mod, uint64_t addr) { - return addr - (uint64_t) mod->module_core < mod->core_size; + return in_core_rx(mod, addr) || in_core_rw(mod, addr); } static inline int @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_ break; case RV_BDREL: - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core); + if (in_init_rx(mod, val)) + val -= (uint64_t) mod->module_init_rx; + else if (in_init_rw(mod, val)) + val -= (uint64_t) mod->module_init_rw; + else if (in_core_rx(mod, val)) + val -= (uint64_t) mod->module_core_rx; + else if (in_core_rw(mod, val)) + val -= (uint64_t) mod->module_core_rw; break; case RV_LTV: @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, * addresses have been selected... */ uint64_t gp; - if (mod->core_size > MAX_LTOFF) + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF) /* * This takes advantage of fact that SHF_ARCH_SMALL gets allocated * at the end of the module. */ - gp = mod->core_size - MAX_LTOFF / 2; + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2; else - gp = mod->core_size / 2; - gp = (uint64_t) mod->module_core + ((gp + 7) & -8); + gp = (mod->core_size_rx + mod->core_size_rw) / 2; + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8); mod->arch.gp = gp; DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); } diff -urNp linux-2.6.39.1/arch/ia64/kernel/pci-dma.c linux-2.6.39.1/arch/ia64/kernel/pci-dma.c --- linux-2.6.39.1/arch/ia64/kernel/pci-dma.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/kernel/pci-dma.c 2011-05-22 19:36:30.000000000 -0400 @@ -43,7 +43,7 @@ struct device fallback_dev = { .dma_mask = &fallback_dev.coherent_dma_mask, }; -extern struct dma_map_ops intel_dma_ops; +extern const struct dma_map_ops intel_dma_ops; static int __init pci_iommu_init(void) { diff -urNp linux-2.6.39.1/arch/ia64/kernel/pci-swiotlb.c linux-2.6.39.1/arch/ia64/kernel/pci-swiotlb.c --- linux-2.6.39.1/arch/ia64/kernel/pci-swiotlb.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/kernel/pci-swiotlb.c 2011-05-22 19:36:30.000000000 -0400 @@ -22,7 +22,7 @@ static void *ia64_swiotlb_alloc_coherent return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); } -struct dma_map_ops swiotlb_dma_ops = { +const struct dma_map_ops swiotlb_dma_ops = { .alloc_coherent = ia64_swiotlb_alloc_coherent, .free_coherent = swiotlb_free_coherent, .map_page = swiotlb_map_page, diff -urNp linux-2.6.39.1/arch/ia64/kernel/sys_ia64.c linux-2.6.39.1/arch/ia64/kernel/sys_ia64.c --- linux-2.6.39.1/arch/ia64/kernel/sys_ia64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/kernel/sys_ia64.c 2011-05-22 19:36:30.000000000 -0400 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil if (REGION_NUMBER(addr) == RGN_HPAGE) addr = 0; #endif + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + addr = mm->free_area_cache; + else +#endif + if (!addr) addr = mm->free_area_cache; @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) { - if (start_addr != TASK_UNMAPPED_BASE) { + if (start_addr != mm->mmap_base) { /* Start a new search --- just in case we missed some holes. */ - addr = TASK_UNMAPPED_BASE; + addr = mm->mmap_base; goto full_search; } return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { + if (check_heap_stack_gap(vma, addr, len)) { /* Remember the address where we stopped this search: */ mm->free_area_cache = addr + len; return addr; diff -urNp linux-2.6.39.1/arch/ia64/kernel/vmlinux.lds.S linux-2.6.39.1/arch/ia64/kernel/vmlinux.lds.S --- linux-2.6.39.1/arch/ia64/kernel/vmlinux.lds.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/kernel/vmlinux.lds.S 2011-05-22 19:36:30.000000000 -0400 @@ -199,7 +199,7 @@ SECTIONS { /* Per-cpu data: */ . = ALIGN(PERCPU_PAGE_SIZE); PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu) - __phys_per_cpu_start = __per_cpu_load; + __phys_per_cpu_start = per_cpu_load; /* * ensure percpu data fits * into percpu page size diff -urNp linux-2.6.39.1/arch/ia64/mm/fault.c linux-2.6.39.1/arch/ia64/mm/fault.c --- linux-2.6.39.1/arch/ia64/mm/fault.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/mm/fault.c 2011-05-22 19:36:30.000000000 -0400 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned return pte_present(pte); } +#ifdef CONFIG_PAX_PAGEEXEC +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 8; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif + void __kprobes ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) { @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); - if ((vma->vm_flags & mask) != mask) + if ((vma->vm_flags & mask) != mask) { + +#ifdef CONFIG_PAX_PAGEEXEC + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) { + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip) + goto bad_area; + + up_read(&mm->mmap_sem); + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12); + do_group_exit(SIGKILL); + } +#endif + goto bad_area; + } + /* * If for any reason at all we couldn't handle the fault, make * sure we exit gracefully rather than endlessly redo the diff -urNp linux-2.6.39.1/arch/ia64/mm/hugetlbpage.c linux-2.6.39.1/arch/ia64/mm/hugetlbpage.c --- linux-2.6.39.1/arch/ia64/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/mm/hugetlbpage.c 2011-05-22 19:36:30.000000000 -0400 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area( /* At this point: (!vmm || addr < vmm->vm_end). */ if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) return -ENOMEM; - if (!vmm || (addr + len) <= vmm->vm_start) + if (check_heap_stack_gap(vmm, addr, len)) return addr; addr = ALIGN(vmm->vm_end, HPAGE_SIZE); } diff -urNp linux-2.6.39.1/arch/ia64/mm/init.c linux-2.6.39.1/arch/ia64/mm/init.c --- linux-2.6.39.1/arch/ia64/mm/init.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/mm/init.c 2011-05-22 19:36:30.000000000 -0400 @@ -122,6 +122,19 @@ ia64_init_addr_space (void) vma->vm_start = current->thread.rbs_bot & PAGE_MASK; vma->vm_end = vma->vm_start + PAGE_SIZE; vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; + +#ifdef CONFIG_PAX_PAGEEXEC + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) { + vma->vm_flags &= ~VM_EXEC; + +#ifdef CONFIG_PAX_MPROTECT + if (current->mm->pax_flags & MF_PAX_MPROTECT) + vma->vm_flags &= ~VM_MAYEXEC; +#endif + + } +#endif + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); down_write(¤t->mm->mmap_sem); if (insert_vm_struct(current->mm, vma)) { diff -urNp linux-2.6.39.1/arch/ia64/pci/pci.c linux-2.6.39.1/arch/ia64/pci/pci.c --- linux-2.6.39.1/arch/ia64/pci/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/pci/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -109,7 +109,7 @@ static int pci_write(struct pci_bus *bus devfn, where, size, value); } -struct pci_ops pci_root_ops = { +const struct pci_ops pci_root_ops = { .read = pci_read, .write = pci_write, }; diff -urNp linux-2.6.39.1/arch/ia64/sn/pci/pci_dma.c linux-2.6.39.1/arch/ia64/sn/pci/pci_dma.c --- linux-2.6.39.1/arch/ia64/sn/pci/pci_dma.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/ia64/sn/pci/pci_dma.c 2011-05-22 19:36:30.000000000 -0400 @@ -465,7 +465,7 @@ int sn_pci_legacy_write(struct pci_bus * return ret; } -static struct dma_map_ops sn_dma_ops = { +static const struct dma_map_ops sn_dma_ops = { .alloc_coherent = sn_dma_alloc_coherent, .free_coherent = sn_dma_free_coherent, .map_page = sn_dma_map_page, diff -urNp linux-2.6.39.1/arch/m32r/lib/usercopy.c linux-2.6.39.1/arch/m32r/lib/usercopy.c --- linux-2.6.39.1/arch/m32r/lib/usercopy.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/m32r/lib/usercopy.c 2011-05-22 19:36:30.000000000 -0400 @@ -14,6 +14,9 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n) { + if ((long)n < 0) + return n; + prefetch(from); if (access_ok(VERIFY_WRITE, to, n)) __copy_user(to,from,n); @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n) { + if ((long)n < 0) + return n; + prefetchw(to); if (access_ok(VERIFY_READ, from, n)) __copy_user_zeroing(to,from,n); diff -urNp linux-2.6.39.1/arch/microblaze/include/asm/device.h linux-2.6.39.1/arch/microblaze/include/asm/device.h --- linux-2.6.39.1/arch/microblaze/include/asm/device.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/microblaze/include/asm/device.h 2011-05-22 19:36:30.000000000 -0400 @@ -13,7 +13,7 @@ struct device_node; struct dev_archdata { /* DMA operations on that device */ - struct dma_map_ops *dma_ops; + const struct dma_map_ops *dma_ops; void *dma_data; }; diff -urNp linux-2.6.39.1/arch/microblaze/include/asm/dma-mapping.h linux-2.6.39.1/arch/microblaze/include/asm/dma-mapping.h --- linux-2.6.39.1/arch/microblaze/include/asm/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/microblaze/include/asm/dma-mapping.h 2011-05-22 19:36:30.000000000 -0400 @@ -43,14 +43,14 @@ static inline unsigned long device_to_ma return 0xfffffffful; } -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; /* * Available generic sets of operations */ -extern struct dma_map_ops dma_direct_ops; +extern const struct dma_map_ops dma_direct_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { /* We don't handle the NULL dev case for ISA for now. We could * do it via an out of line call but it is not needed for now. The @@ -63,14 +63,14 @@ static inline struct dma_map_ops *get_dm return dev->archdata.dma_ops; } -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops) { dev->archdata.dma_ops = ops; } static inline int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (unlikely(!ops)) return 0; @@ -81,7 +81,7 @@ static inline int dma_supported(struct d static inline int dma_set_mask(struct device *dev, u64 dma_mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (unlikely(ops == NULL)) return -EIO; @@ -97,7 +97,7 @@ static inline int dma_set_mask(struct de static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (ops->mapping_error) return ops->mapping_error(dev, dma_addr); @@ -110,7 +110,7 @@ static inline int dma_mapping_error(stru static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); void *memory; BUG_ON(!ops); @@ -124,7 +124,7 @@ static inline void *dma_alloc_coherent(s static inline void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); diff -urNp linux-2.6.39.1/arch/microblaze/include/asm/pci.h linux-2.6.39.1/arch/microblaze/include/asm/pci.h --- linux-2.6.39.1/arch/microblaze/include/asm/pci.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/microblaze/include/asm/pci.h 2011-05-22 19:36:30.000000000 -0400 @@ -54,8 +54,8 @@ static inline void pcibios_penalize_isa_ } #ifdef CONFIG_PCI -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops); -extern struct dma_map_ops *get_pci_dma_ops(void); +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops); +extern const struct dma_map_ops *get_pci_dma_ops(void); #else /* CONFIG_PCI */ #define set_pci_dma_ops(d) #define get_pci_dma_ops() NULL diff -urNp linux-2.6.39.1/arch/microblaze/kernel/dma.c linux-2.6.39.1/arch/microblaze/kernel/dma.c --- linux-2.6.39.1/arch/microblaze/kernel/dma.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/microblaze/kernel/dma.c 2011-05-22 19:36:30.000000000 -0400 @@ -134,7 +134,7 @@ static inline void dma_direct_unmap_page __dma_sync_page(dma_address, 0 , size, direction); } -struct dma_map_ops dma_direct_ops = { +const struct dma_map_ops dma_direct_ops = { .alloc_coherent = dma_direct_alloc_coherent, .free_coherent = dma_direct_free_coherent, .map_sg = dma_direct_map_sg, diff -urNp linux-2.6.39.1/arch/microblaze/kernel/kgdb.c linux-2.6.39.1/arch/microblaze/kernel/kgdb.c --- linux-2.6.39.1/arch/microblaze/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/microblaze/kernel/kgdb.c 2011-05-22 19:36:30.000000000 -0400 @@ -141,7 +141,7 @@ void kgdb_arch_exit(void) /* * Global data */ -struct kgdb_arch arch_kgdb_ops = { +const struct kgdb_arch arch_kgdb_ops = { #ifdef __MICROBLAZEEL__ .gdb_bpt_instr = {0x18, 0x00, 0x0c, 0xba}, /* brki r16, 0x18 */ #else diff -urNp linux-2.6.39.1/arch/microblaze/pci/indirect_pci.c linux-2.6.39.1/arch/microblaze/pci/indirect_pci.c --- linux-2.6.39.1/arch/microblaze/pci/indirect_pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/microblaze/pci/indirect_pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -140,7 +140,7 @@ indirect_write_config(struct pci_bus *bu return PCIBIOS_SUCCESSFUL; } -static struct pci_ops indirect_pci_ops = { +static const struct pci_ops indirect_pci_ops = { .read = indirect_read_config, .write = indirect_write_config, }; diff -urNp linux-2.6.39.1/arch/microblaze/pci/pci-common.c linux-2.6.39.1/arch/microblaze/pci/pci-common.c --- linux-2.6.39.1/arch/microblaze/pci/pci-common.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/microblaze/pci/pci-common.c 2011-05-22 19:36:30.000000000 -0400 @@ -48,14 +48,14 @@ resource_size_t isa_mem_base; /* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */ unsigned int pci_flags; -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops; -void set_pci_dma_ops(struct dma_map_ops *dma_ops) +void set_pci_dma_ops(const struct dma_map_ops *dma_ops) { pci_dma_ops = dma_ops; } -struct dma_map_ops *get_pci_dma_ops(void) +const struct dma_map_ops *get_pci_dma_ops(void) { return pci_dma_ops; } @@ -1583,7 +1583,7 @@ null_write_config(struct pci_bus *bus, u return PCIBIOS_DEVICE_NOT_FOUND; } -static struct pci_ops null_pci_ops = { +static const struct pci_ops null_pci_ops = { .read = null_read_config, .write = null_write_config, }; diff -urNp linux-2.6.39.1/arch/mips/alchemy/common/pci.c linux-2.6.39.1/arch/mips/alchemy/common/pci.c --- linux-2.6.39.1/arch/mips/alchemy/common/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/alchemy/common/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -51,7 +51,7 @@ static struct resource pci_mem_resource .flags = IORESOURCE_MEM }; -extern struct pci_ops au1x_pci_ops; +extern const struct pci_ops au1x_pci_ops; static struct pci_controller au1x_controller = { .pci_ops = &au1x_pci_ops, diff -urNp linux-2.6.39.1/arch/mips/cavium-octeon/dma-octeon.c linux-2.6.39.1/arch/mips/cavium-octeon/dma-octeon.c --- linux-2.6.39.1/arch/mips/cavium-octeon/dma-octeon.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/cavium-octeon/dma-octeon.c 2011-05-22 19:36:30.000000000 -0400 @@ -202,7 +202,7 @@ static phys_addr_t octeon_unity_dma_to_p } struct octeon_dma_map_ops { - struct dma_map_ops dma_map_ops; + const struct dma_map_ops dma_map_ops; dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr); phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr); }; @@ -324,7 +324,7 @@ static struct octeon_dma_map_ops _octeon }, }; -struct dma_map_ops *octeon_pci_dma_map_ops; +const struct dma_map_ops *octeon_pci_dma_map_ops; void __init octeon_pci_dma_init(void) { diff -urNp linux-2.6.39.1/arch/mips/cobalt/pci.c linux-2.6.39.1/arch/mips/cobalt/pci.c --- linux-2.6.39.1/arch/mips/cobalt/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/cobalt/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -14,7 +14,7 @@ #include -extern struct pci_ops gt64xxx_pci0_ops; +extern const struct pci_ops gt64xxx_pci0_ops; static struct resource cobalt_mem_resource = { .start = GT_DEF_PCI0_MEM0_BASE, diff -urNp linux-2.6.39.1/arch/mips/include/asm/device.h linux-2.6.39.1/arch/mips/include/asm/device.h --- linux-2.6.39.1/arch/mips/include/asm/device.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/include/asm/device.h 2011-05-22 19:36:30.000000000 -0400 @@ -10,7 +10,7 @@ struct dma_map_ops; struct dev_archdata { /* DMA operations on that device */ - struct dma_map_ops *dma_ops; + const struct dma_map_ops *dma_ops; }; struct pdev_archdata { diff -urNp linux-2.6.39.1/arch/mips/include/asm/dma-mapping.h linux-2.6.39.1/arch/mips/include/asm/dma-mapping.h --- linux-2.6.39.1/arch/mips/include/asm/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/include/asm/dma-mapping.h 2011-05-22 19:36:30.000000000 -0400 @@ -9,9 +9,9 @@ #include #endif -extern struct dma_map_ops *mips_dma_map_ops; +extern const struct dma_map_ops *mips_dma_map_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { if (dev && dev->archdata.dma_ops) return dev->archdata.dma_ops; @@ -33,13 +33,13 @@ static inline void dma_mark_clean(void * static inline int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); return ops->dma_supported(dev, mask); } static inline int dma_mapping_error(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); return ops->mapping_error(dev, mask); } @@ -61,7 +61,7 @@ static inline void *dma_alloc_coherent(s dma_addr_t *dma_handle, gfp_t gfp) { void *ret; - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); ret = ops->alloc_coherent(dev, size, dma_handle, gfp); @@ -73,7 +73,7 @@ static inline void *dma_alloc_coherent(s static inline void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); ops->free_coherent(dev, size, vaddr, dma_handle); diff -urNp linux-2.6.39.1/arch/mips/include/asm/elf.h linux-2.6.39.1/arch/mips/include/asm/elf.h --- linux-2.6.39.1/arch/mips/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/include/asm/elf.h 2011-05-22 19:36:30.000000000 -0400 @@ -372,13 +372,16 @@ extern const char *__elf_platform; #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) #endif +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) + +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#endif + #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp); -struct mm_struct; -extern unsigned long arch_randomize_brk(struct mm_struct *mm); -#define arch_randomize_brk arch_randomize_brk - #endif /* _ASM_ELF_H */ diff -urNp linux-2.6.39.1/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h linux-2.6.39.1/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h --- linux-2.6.39.1/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h 2011-05-22 19:36:30.000000000 -0400 @@ -66,7 +66,7 @@ dma_addr_t phys_to_dma(struct device *de phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); struct dma_map_ops; -extern struct dma_map_ops *octeon_pci_dma_map_ops; +extern const struct dma_map_ops *octeon_pci_dma_map_ops; extern char *octeon_swiotlb; #endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */ diff -urNp linux-2.6.39.1/arch/mips/include/asm/page.h linux-2.6.39.1/arch/mips/include/asm/page.h --- linux-2.6.39.1/arch/mips/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/include/asm/page.h 2011-05-22 19:36:30.000000000 -0400 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa #ifdef CONFIG_CPU_MIPS32 typedef struct { unsigned long pte_low, pte_high; } pte_t; #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; }) #else typedef struct { unsigned long long pte; } pte_t; #define pte_val(x) ((x).pte) diff -urNp linux-2.6.39.1/arch/mips/include/asm/pci/bridge.h linux-2.6.39.1/arch/mips/include/asm/pci/bridge.h --- linux-2.6.39.1/arch/mips/include/asm/pci/bridge.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/include/asm/pci/bridge.h 2011-05-22 19:36:30.000000000 -0400 @@ -849,6 +849,6 @@ struct bridge_controller { extern void register_bridge_irq(unsigned int irq); extern int request_bridge_irq(struct bridge_controller *bc); -extern struct pci_ops bridge_pci_ops; +extern const struct pci_ops bridge_pci_ops; #endif /* _ASM_PCI_BRIDGE_H */ diff -urNp linux-2.6.39.1/arch/mips/include/asm/system.h linux-2.6.39.1/arch/mips/include/asm/system.h --- linux-2.6.39.1/arch/mips/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/include/asm/system.h 2011-05-22 19:36:30.000000000 -0400 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void); */ #define __ARCH_WANT_UNLOCKED_CTXSW -extern unsigned long arch_align_stack(unsigned long sp); +#define arch_align_stack(x) ((x) & ~0xfUL) #endif /* _ASM_SYSTEM_H */ diff -urNp linux-2.6.39.1/arch/mips/kernel/binfmt_elfn32.c linux-2.6.39.1/arch/mips/kernel/binfmt_elfn32.c --- linux-2.6.39.1/arch/mips/kernel/binfmt_elfn32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/kernel/binfmt_elfn32.c 2011-05-22 19:36:30.000000000 -0400 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N #undef ELF_ET_DYN_BASE #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) + +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#endif + #include #include #include diff -urNp linux-2.6.39.1/arch/mips/kernel/binfmt_elfo32.c linux-2.6.39.1/arch/mips/kernel/binfmt_elfo32.c --- linux-2.6.39.1/arch/mips/kernel/binfmt_elfo32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/kernel/binfmt_elfo32.c 2011-05-22 19:36:30.000000000 -0400 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N #undef ELF_ET_DYN_BASE #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) + +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#endif + #include /* diff -urNp linux-2.6.39.1/arch/mips/kernel/kgdb.c linux-2.6.39.1/arch/mips/kernel/kgdb.c --- linux-2.6.39.1/arch/mips/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/kernel/kgdb.c 2011-05-22 19:36:30.000000000 -0400 @@ -351,7 +351,7 @@ int kgdb_arch_handle_exception(int vecto return -1; } -struct kgdb_arch arch_kgdb_ops; +struct kgdb_arch arch_kgdb_ops; /* cannot be const, see kgdb_arch_init */ /* * We use kgdb_early_setup so that functions we need to call now don't diff -urNp linux-2.6.39.1/arch/mips/kernel/process.c linux-2.6.39.1/arch/mips/kernel/process.c --- linux-2.6.39.1/arch/mips/kernel/process.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/kernel/process.c 2011-05-22 19:36:30.000000000 -0400 @@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru out: return pc; } - -/* - * Don't forget that the stack pointer must be aligned on a 8 bytes - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. - */ -unsigned long arch_align_stack(unsigned long sp) -{ - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) - sp -= get_random_int() & ~PAGE_MASK; - - return sp & ALMASK; -} diff -urNp linux-2.6.39.1/arch/mips/kernel/syscall.c linux-2.6.39.1/arch/mips/kernel/syscall.c --- linux-2.6.39.1/arch/mips/kernel/syscall.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/kernel/syscall.c 2011-05-22 19:36:30.000000000 -0400 @@ -108,14 +108,18 @@ unsigned long arch_get_unmapped_area(str do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; + +#ifdef CONFIG_PAX_RANDMMAP + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vmm = find_vma(current->mm, addr); - if (task_size - len >= addr && - (!vmm || addr + len <= vmm->vm_start)) + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len)) return addr; } addr = current->mm->mmap_base; @@ -128,7 +132,7 @@ unsigned long arch_get_unmapped_area(str /* At this point: (!vmm || addr < vmm->vm_end). */ if (task_size - len < addr) return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) + if (check_heap_stack_gap(vmm, addr, len)) return addr; addr = vmm->vm_end; if (do_color_align) @@ -154,33 +158,6 @@ void arch_pick_mmap_layout(struct mm_str mm->unmap_area = arch_unmap_area; } -static inline unsigned long brk_rnd(void) -{ - unsigned long rnd = get_random_int(); - - rnd = rnd << PAGE_SHIFT; - /* 8MB for 32bit, 256MB for 64bit */ - if (TASK_IS_32BIT_ADDR) - rnd = rnd & 0x7ffffful; - else - rnd = rnd & 0xffffffful; - - return rnd; -} - -unsigned long arch_randomize_brk(struct mm_struct *mm) -{ - unsigned long base = mm->brk; - unsigned long ret; - - ret = PAGE_ALIGN(base + brk_rnd()); - - if (ret < mm->brk) - return mm->brk; - - return ret; -} - SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, off_t, offset) diff -urNp linux-2.6.39.1/arch/mips/mm/dma-default.c linux-2.6.39.1/arch/mips/mm/dma-default.c --- linux-2.6.39.1/arch/mips/mm/dma-default.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/mm/dma-default.c 2011-05-22 19:36:30.000000000 -0400 @@ -300,7 +300,7 @@ void dma_cache_sync(struct device *dev, EXPORT_SYMBOL(dma_cache_sync); -static struct dma_map_ops mips_default_dma_map_ops = { +static const struct dma_map_ops mips_default_dma_map_ops = { .alloc_coherent = mips_dma_alloc_coherent, .free_coherent = mips_dma_free_coherent, .map_page = mips_dma_map_page, @@ -315,7 +315,7 @@ static struct dma_map_ops mips_default_d .dma_supported = mips_dma_supported }; -struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; +const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; EXPORT_SYMBOL(mips_dma_map_ops); #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) diff -urNp linux-2.6.39.1/arch/mips/mm/fault.c linux-2.6.39.1/arch/mips/mm/fault.c --- linux-2.6.39.1/arch/mips/mm/fault.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/mm/fault.c 2011-05-22 19:36:30.000000000 -0400 @@ -28,6 +28,23 @@ #include /* For VMALLOC_END */ #include +#ifdef CONFIG_PAX_PAGEEXEC +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif + /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate diff -urNp linux-2.6.39.1/arch/mips/mti-malta/malta-pci.c linux-2.6.39.1/arch/mips/mti-malta/malta-pci.c --- linux-2.6.39.1/arch/mips/mti-malta/malta-pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/mti-malta/malta-pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -64,9 +64,9 @@ static struct resource msc_io_resource = .flags = IORESOURCE_IO, }; -extern struct pci_ops bonito64_pci_ops; -extern struct pci_ops gt64xxx_pci0_ops; -extern struct pci_ops msc_pci_ops; +extern const struct pci_ops bonito64_pci_ops; +extern const struct pci_ops gt64xxx_pci0_ops; +extern const struct pci_ops msc_pci_ops; static struct pci_controller bonito64_controller = { .pci_ops = &bonito64_pci_ops, diff -urNp linux-2.6.39.1/arch/mips/nxp/pnx8550/common/pci.c linux-2.6.39.1/arch/mips/nxp/pnx8550/common/pci.c --- linux-2.6.39.1/arch/mips/nxp/pnx8550/common/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/nxp/pnx8550/common/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -40,7 +40,7 @@ static struct resource pci_mem_resource .flags = IORESOURCE_MEM }; -extern struct pci_ops pnx8550_pci_ops; +extern const struct pci_ops pnx8550_pci_ops; static struct pci_controller pnx8550_controller = { .pci_ops = &pnx8550_pci_ops, diff -urNp linux-2.6.39.1/arch/mips/pci/ops-au1000.c linux-2.6.39.1/arch/mips/pci/ops-au1000.c --- linux-2.6.39.1/arch/mips/pci/ops-au1000.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/ops-au1000.c 2011-05-22 19:36:30.000000000 -0400 @@ -302,7 +302,7 @@ static int config_write(struct pci_bus * } } -struct pci_ops au1x_pci_ops = { +const struct pci_ops au1x_pci_ops = { config_read, config_write }; diff -urNp linux-2.6.39.1/arch/mips/pci/ops-bcm63xx.c linux-2.6.39.1/arch/mips/pci/ops-bcm63xx.c --- linux-2.6.39.1/arch/mips/pci/ops-bcm63xx.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/ops-bcm63xx.c 2011-05-22 19:36:30.000000000 -0400 @@ -173,7 +173,7 @@ static int bcm63xx_pci_write(struct pci_ where, size, val); } -struct pci_ops bcm63xx_pci_ops = { +const struct pci_ops bcm63xx_pci_ops = { .read = bcm63xx_pci_read, .write = bcm63xx_pci_write }; @@ -402,7 +402,7 @@ static int bcm63xx_cb_write(struct pci_b return PCIBIOS_DEVICE_NOT_FOUND; } -struct pci_ops bcm63xx_cb_ops = { +const struct pci_ops bcm63xx_cb_ops = { .read = bcm63xx_cb_read, .write = bcm63xx_cb_write, }; diff -urNp linux-2.6.39.1/arch/mips/pci/ops-bonito64.c linux-2.6.39.1/arch/mips/pci/ops-bonito64.c --- linux-2.6.39.1/arch/mips/pci/ops-bonito64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/ops-bonito64.c 2011-05-22 19:36:30.000000000 -0400 @@ -155,7 +155,7 @@ static int bonito64_pcibios_write(struct return PCIBIOS_SUCCESSFUL; } -struct pci_ops bonito64_pci_ops = { +const struct pci_ops bonito64_pci_ops = { .read = bonito64_pcibios_read, .write = bonito64_pcibios_write }; diff -urNp linux-2.6.39.1/arch/mips/pci/ops-bridge.c linux-2.6.39.1/arch/mips/pci/ops-bridge.c --- linux-2.6.39.1/arch/mips/pci/ops-bridge.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/ops-bridge.c 2011-05-22 19:36:30.000000000 -0400 @@ -316,7 +316,7 @@ static int pci_write_config(struct pci_b return pci_conf0_write_config(bus, devfn, where, size, value); } -struct pci_ops bridge_pci_ops = { +const struct pci_ops bridge_pci_ops = { .read = pci_read_config, .write = pci_write_config, }; diff -urNp linux-2.6.39.1/arch/mips/pci/ops-emma2rh.c linux-2.6.39.1/arch/mips/pci/ops-emma2rh.c --- linux-2.6.39.1/arch/mips/pci/ops-emma2rh.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/ops-emma2rh.c 2011-05-22 19:36:30.000000000 -0400 @@ -176,7 +176,7 @@ static int pci_config_write(struct pci_b return PCIBIOS_SUCCESSFUL; } -struct pci_ops emma2rh_pci_ops = { +const struct pci_ops emma2rh_pci_ops = { .read = pci_config_read, .write = pci_config_write, }; diff -urNp linux-2.6.39.1/arch/mips/pci/ops-gt64xxx_pci0.c linux-2.6.39.1/arch/mips/pci/ops-gt64xxx_pci0.c --- linux-2.6.39.1/arch/mips/pci/ops-gt64xxx_pci0.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/ops-gt64xxx_pci0.c 2011-05-22 19:36:30.000000000 -0400 @@ -146,7 +146,7 @@ static int gt64xxx_pci0_pcibios_write(st return PCIBIOS_SUCCESSFUL; } -struct pci_ops gt64xxx_pci0_ops = { +const struct pci_ops gt64xxx_pci0_ops = { .read = gt64xxx_pci0_pcibios_read, .write = gt64xxx_pci0_pcibios_write }; diff -urNp linux-2.6.39.1/arch/mips/pci/ops-loongson2.c linux-2.6.39.1/arch/mips/pci/ops-loongson2.c --- linux-2.6.39.1/arch/mips/pci/ops-loongson2.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/ops-loongson2.c 2011-05-22 19:36:30.000000000 -0400 @@ -174,7 +174,7 @@ static int loongson_pcibios_write(struct return PCIBIOS_SUCCESSFUL; } -struct pci_ops loongson_pci_ops = { +const struct pci_ops loongson_pci_ops = { .read = loongson_pcibios_read, .write = loongson_pcibios_write }; diff -urNp linux-2.6.39.1/arch/mips/pci/ops-mace.c linux-2.6.39.1/arch/mips/pci/ops-mace.c --- linux-2.6.39.1/arch/mips/pci/ops-mace.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/ops-mace.c 2011-05-22 19:36:30.000000000 -0400 @@ -96,7 +96,7 @@ mace_pci_write_config(struct pci_bus *bu return PCIBIOS_SUCCESSFUL; } -struct pci_ops mace_pci_ops = { +const struct pci_ops mace_pci_ops = { .read = mace_pci_read_config, .write = mace_pci_write_config, }; diff -urNp linux-2.6.39.1/arch/mips/pci/ops-msc.c linux-2.6.39.1/arch/mips/pci/ops-msc.c --- linux-2.6.39.1/arch/mips/pci/ops-msc.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/ops-msc.c 2011-05-22 19:36:30.000000000 -0400 @@ -142,7 +142,7 @@ static int msc_pcibios_write(struct pci_ return PCIBIOS_SUCCESSFUL; } -struct pci_ops msc_pci_ops = { +const struct pci_ops msc_pci_ops = { .read = msc_pcibios_read, .write = msc_pcibios_write }; diff -urNp linux-2.6.39.1/arch/mips/pci/ops-nile4.c linux-2.6.39.1/arch/mips/pci/ops-nile4.c --- linux-2.6.39.1/arch/mips/pci/ops-nile4.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/ops-nile4.c 2011-05-22 19:36:30.000000000 -0400 @@ -141,7 +141,7 @@ static int nile4_pcibios_write(struct pc return PCIBIOS_SUCCESSFUL; } -struct pci_ops nile4_pci_ops = { +const struct pci_ops nile4_pci_ops = { .read = nile4_pcibios_read, .write = nile4_pcibios_write, }; diff -urNp linux-2.6.39.1/arch/mips/pci/ops-pmcmsp.c linux-2.6.39.1/arch/mips/pci/ops-pmcmsp.c --- linux-2.6.39.1/arch/mips/pci/ops-pmcmsp.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/ops-pmcmsp.c 2011-05-22 19:36:30.000000000 -0400 @@ -904,7 +904,7 @@ msp_pcibios_write_config(struct pci_bus * write - function for Linux to generate PCI Configuration writes. * ****************************************************************************/ -struct pci_ops msp_pci_ops = { +const struct pci_ops msp_pci_ops = { .read = msp_pcibios_read_config, .write = msp_pcibios_write_config }; diff -urNp linux-2.6.39.1/arch/mips/pci/ops-pnx8550.c linux-2.6.39.1/arch/mips/pci/ops-pnx8550.c --- linux-2.6.39.1/arch/mips/pci/ops-pnx8550.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/ops-pnx8550.c 2011-05-22 19:36:30.000000000 -0400 @@ -276,7 +276,7 @@ static int config_write(struct pci_bus * } } -struct pci_ops pnx8550_pci_ops = { +const struct pci_ops pnx8550_pci_ops = { config_read, config_write }; diff -urNp linux-2.6.39.1/arch/mips/pci/ops-rc32434.c linux-2.6.39.1/arch/mips/pci/ops-rc32434.c --- linux-2.6.39.1/arch/mips/pci/ops-rc32434.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/ops-rc32434.c 2011-05-22 19:36:30.000000000 -0400 @@ -201,7 +201,7 @@ static int pci_config_write(struct pci_b } } -struct pci_ops rc32434_pci_ops = { +const struct pci_ops rc32434_pci_ops = { .read = pci_config_read, .write = pci_config_write, }; diff -urNp linux-2.6.39.1/arch/mips/pci/ops-sni.c linux-2.6.39.1/arch/mips/pci/ops-sni.c --- linux-2.6.39.1/arch/mips/pci/ops-sni.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/ops-sni.c 2011-05-22 19:36:30.000000000 -0400 @@ -83,7 +83,7 @@ static int pcimt_write(struct pci_bus *b return 0; } -struct pci_ops sni_pcimt_ops = { +const struct pci_ops sni_pcimt_ops = { .read = pcimt_read, .write = pcimt_write, }; @@ -158,7 +158,7 @@ static int pcit_write(struct pci_bus *bu } -struct pci_ops sni_pcit_ops = { +const struct pci_ops sni_pcit_ops = { .read = pcit_read, .write = pcit_write, }; diff -urNp linux-2.6.39.1/arch/mips/pci/ops-titan.c linux-2.6.39.1/arch/mips/pci/ops-titan.c --- linux-2.6.39.1/arch/mips/pci/ops-titan.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/ops-titan.c 2011-05-22 19:36:30.000000000 -0400 @@ -105,7 +105,7 @@ static int titan_write_config(struct pci /* * Titan PCI structure */ -struct pci_ops titan_pci_ops = { +const struct pci_ops titan_pci_ops = { titan_read_config, titan_write_config, }; diff -urNp linux-2.6.39.1/arch/mips/pci/ops-titan-ht.c linux-2.6.39.1/arch/mips/pci/ops-titan-ht.c --- linux-2.6.39.1/arch/mips/pci/ops-titan-ht.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/ops-titan-ht.c 2011-05-22 19:36:30.000000000 -0400 @@ -118,7 +118,7 @@ static int titan_ht_config_write(struct return PCIBIOS_SUCCESSFUL; } -struct pci_ops titan_ht_pci_ops = { +const struct pci_ops titan_ht_pci_ops = { .read = titan_ht_config_read, .write = titan_ht_config_write, }; diff -urNp linux-2.6.39.1/arch/mips/pci/ops-tx3927.c linux-2.6.39.1/arch/mips/pci/ops-tx3927.c --- linux-2.6.39.1/arch/mips/pci/ops-tx3927.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/ops-tx3927.c 2011-05-22 19:36:30.000000000 -0400 @@ -121,7 +121,7 @@ static int tx3927_pci_write_config(struc return check_abort(); } -static struct pci_ops tx3927_pci_ops = { +static const struct pci_ops tx3927_pci_ops = { .read = tx3927_pci_read_config, .write = tx3927_pci_write_config, }; diff -urNp linux-2.6.39.1/arch/mips/pci/ops-vr41xx.c linux-2.6.39.1/arch/mips/pci/ops-vr41xx.c --- linux-2.6.39.1/arch/mips/pci/ops-vr41xx.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/ops-vr41xx.c 2011-05-22 19:36:30.000000000 -0400 @@ -120,7 +120,7 @@ static int pci_config_write(struct pci_b return PCIBIOS_SUCCESSFUL; } -struct pci_ops vr41xx_pci_ops = { +const struct pci_ops vr41xx_pci_ops = { .read = pci_config_read, .write = pci_config_write, }; diff -urNp linux-2.6.39.1/arch/mips/pci/pci-bcm1480.c linux-2.6.39.1/arch/mips/pci/pci-bcm1480.c --- linux-2.6.39.1/arch/mips/pci/pci-bcm1480.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/pci-bcm1480.c 2011-05-22 19:36:30.000000000 -0400 @@ -171,7 +171,7 @@ static int bcm1480_pcibios_write(struct return PCIBIOS_SUCCESSFUL; } -struct pci_ops bcm1480_pci_ops = { +const struct pci_ops bcm1480_pci_ops = { bcm1480_pcibios_read, bcm1480_pcibios_write, }; diff -urNp linux-2.6.39.1/arch/mips/pci/pci-bcm1480ht.c linux-2.6.39.1/arch/mips/pci/pci-bcm1480ht.c --- linux-2.6.39.1/arch/mips/pci/pci-bcm1480ht.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/pci-bcm1480ht.c 2011-05-22 19:36:30.000000000 -0400 @@ -166,7 +166,7 @@ static int bcm1480ht_pcibios_get_busno(v return 0; } -struct pci_ops bcm1480ht_pci_ops = { +const struct pci_ops bcm1480ht_pci_ops = { .read = bcm1480ht_pcibios_read, .write = bcm1480ht_pcibios_write, }; diff -urNp linux-2.6.39.1/arch/mips/pci/pci-bcm63xx.h linux-2.6.39.1/arch/mips/pci/pci-bcm63xx.h --- linux-2.6.39.1/arch/mips/pci/pci-bcm63xx.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/pci-bcm63xx.h 2011-05-22 19:36:30.000000000 -0400 @@ -16,8 +16,8 @@ /* * defined in ops-bcm63xx.c */ -extern struct pci_ops bcm63xx_pci_ops; -extern struct pci_ops bcm63xx_cb_ops; +extern const struct pci_ops bcm63xx_pci_ops; +extern const struct pci_ops bcm63xx_cb_ops; /* * defined in pci-bcm63xx.c diff -urNp linux-2.6.39.1/arch/mips/pci/pci-emma2rh.c linux-2.6.39.1/arch/mips/pci/pci-emma2rh.c --- linux-2.6.39.1/arch/mips/pci/pci-emma2rh.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/pci-emma2rh.c 2011-05-22 19:36:30.000000000 -0400 @@ -43,7 +43,7 @@ static struct resource pci_mem_resource .flags = IORESOURCE_MEM, }; -extern struct pci_ops emma2rh_pci_ops; +extern const struct pci_ops emma2rh_pci_ops; static struct pci_controller emma2rh_pci_controller = { .pci_ops = &emma2rh_pci_ops, diff -urNp linux-2.6.39.1/arch/mips/pci/pcie-octeon.c linux-2.6.39.1/arch/mips/pci/pcie-octeon.c --- linux-2.6.39.1/arch/mips/pci/pcie-octeon.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/pcie-octeon.c 2011-05-22 19:36:30.000000000 -0400 @@ -1237,7 +1237,7 @@ static int octeon_pcie1_write_config(str return octeon_pcie_write_config(1, bus, devfn, reg, size, val); } -static struct pci_ops octeon_pcie0_ops = { +static const struct pci_ops octeon_pcie0_ops = { octeon_pcie0_read_config, octeon_pcie0_write_config, }; @@ -1258,7 +1258,7 @@ static struct pci_controller octeon_pcie .io_resource = &octeon_pcie0_io_resource, }; -static struct pci_ops octeon_pcie1_ops = { +static const struct pci_ops octeon_pcie1_ops = { octeon_pcie1_read_config, octeon_pcie1_write_config, }; diff -urNp linux-2.6.39.1/arch/mips/pci/pci-ip27.c linux-2.6.39.1/arch/mips/pci/pci-ip27.c --- linux-2.6.39.1/arch/mips/pci/pci-ip27.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/pci-ip27.c 2011-05-22 19:36:30.000000000 -0400 @@ -39,7 +39,7 @@ static struct bridge_controller bridges[ struct bridge_controller *irq_to_bridge[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS]; int irq_to_slot[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS]; -extern struct pci_ops bridge_pci_ops; +extern const struct pci_ops bridge_pci_ops; int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid) { diff -urNp linux-2.6.39.1/arch/mips/pci/pci-ip32.c linux-2.6.39.1/arch/mips/pci/pci-ip32.c --- linux-2.6.39.1/arch/mips/pci/pci-ip32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/pci-ip32.c 2011-05-22 19:36:30.000000000 -0400 @@ -82,7 +82,7 @@ static irqreturn_t macepci_error(int irq } -extern struct pci_ops mace_pci_ops; +extern const struct pci_ops mace_pci_ops; #ifdef CONFIG_64BIT static struct resource mace_pci_mem_resource = { .name = "SGI O2 PCI MEM", diff -urNp linux-2.6.39.1/arch/mips/pci/pci-lasat.c linux-2.6.39.1/arch/mips/pci/pci-lasat.c --- linux-2.6.39.1/arch/mips/pci/pci-lasat.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/pci-lasat.c 2011-05-22 19:36:30.000000000 -0400 @@ -14,8 +14,8 @@ #include -extern struct pci_ops nile4_pci_ops; -extern struct pci_ops gt64xxx_pci0_ops; +extern const struct pci_ops nile4_pci_ops; +extern const struct pci_ops gt64xxx_pci0_ops; static struct resource lasat_pci_mem_resource = { .name = "LASAT PCI MEM", .start = 0x18000000, diff -urNp linux-2.6.39.1/arch/mips/pci/pci-octeon.c linux-2.6.39.1/arch/mips/pci/pci-octeon.c --- linux-2.6.39.1/arch/mips/pci/pci-octeon.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/pci-octeon.c 2011-05-22 19:36:30.000000000 -0400 @@ -334,7 +334,7 @@ static int octeon_write_config(struct pc } -static struct pci_ops octeon_pci_ops = { +static const struct pci_ops octeon_pci_ops = { octeon_read_config, octeon_write_config, }; diff -urNp linux-2.6.39.1/arch/mips/pci/pci-rc32434.c linux-2.6.39.1/arch/mips/pci/pci-rc32434.c --- linux-2.6.39.1/arch/mips/pci/pci-rc32434.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/pci-rc32434.c 2011-05-22 19:36:30.000000000 -0400 @@ -75,7 +75,7 @@ static struct resource rc32434_res_pci_i .flags = IORESOURCE_IO, }; -extern struct pci_ops rc32434_pci_ops; +extern const struct pci_ops rc32434_pci_ops; #define PCI_MEM1_START PCI_ADDR_START #define PCI_MEM1_END (PCI_ADDR_START + CPUTOPCI_MEM_WIN - 1) diff -urNp linux-2.6.39.1/arch/mips/pci/pci-sb1250.c linux-2.6.39.1/arch/mips/pci/pci-sb1250.c --- linux-2.6.39.1/arch/mips/pci/pci-sb1250.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/pci-sb1250.c 2011-05-22 19:36:30.000000000 -0400 @@ -181,7 +181,7 @@ static int sb1250_pcibios_write(struct p return PCIBIOS_SUCCESSFUL; } -struct pci_ops sb1250_pci_ops = { +const struct pci_ops sb1250_pci_ops = { .read = sb1250_pcibios_read, .write = sb1250_pcibios_write, }; diff -urNp linux-2.6.39.1/arch/mips/pci/pci-vr41xx.c linux-2.6.39.1/arch/mips/pci/pci-vr41xx.c --- linux-2.6.39.1/arch/mips/pci/pci-vr41xx.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/pci-vr41xx.c 2011-05-22 19:36:30.000000000 -0400 @@ -36,7 +36,7 @@ #include "pci-vr41xx.h" -extern struct pci_ops vr41xx_pci_ops; +extern const struct pci_ops vr41xx_pci_ops; static void __iomem *pciu_base; diff -urNp linux-2.6.39.1/arch/mips/pci/pci-yosemite.c linux-2.6.39.1/arch/mips/pci/pci-yosemite.c --- linux-2.6.39.1/arch/mips/pci/pci-yosemite.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pci/pci-yosemite.c 2011-05-22 19:36:30.000000000 -0400 @@ -11,7 +11,7 @@ #include #include -extern struct pci_ops titan_pci_ops; +extern const struct pci_ops titan_pci_ops; static struct resource py_mem_resource = { .start = 0xe0000000UL, diff -urNp linux-2.6.39.1/arch/mips/pmc-sierra/yosemite/ht.c linux-2.6.39.1/arch/mips/pmc-sierra/yosemite/ht.c --- linux-2.6.39.1/arch/mips/pmc-sierra/yosemite/ht.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pmc-sierra/yosemite/ht.c 2011-05-22 19:36:30.000000000 -0400 @@ -366,7 +366,7 @@ resource_size_t pcibios_align_resource(v return start; } -struct pci_ops titan_pci_ops = { +const struct pci_ops titan_pci_ops = { titan_ht_config_read_byte, titan_ht_config_read_word, titan_ht_config_read_dword, diff -urNp linux-2.6.39.1/arch/mips/pnx8550/common/pci.c linux-2.6.39.1/arch/mips/pnx8550/common/pci.c --- linux-2.6.39.1/arch/mips/pnx8550/common/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/pnx8550/common/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -40,7 +40,7 @@ static struct resource pci_mem_resource .flags = IORESOURCE_MEM }; -extern struct pci_ops pnx8550_pci_ops; +extern const struct pci_ops pnx8550_pci_ops; static struct pci_controller pnx8550_controller = { .pci_ops = &pnx8550_pci_ops, diff -urNp linux-2.6.39.1/arch/mips/sni/pcimt.c linux-2.6.39.1/arch/mips/sni/pcimt.c --- linux-2.6.39.1/arch/mips/sni/pcimt.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/sni/pcimt.c 2011-05-22 19:36:30.000000000 -0400 @@ -183,7 +183,7 @@ static void __init sni_pcimt_resource_in request_resource(&sni_mem_resource, pcimt_mem_resources + i); } -extern struct pci_ops sni_pcimt_ops; +extern const struct pci_ops sni_pcimt_ops; static struct pci_controller sni_controller = { .pci_ops = &sni_pcimt_ops, diff -urNp linux-2.6.39.1/arch/mips/sni/pcit.c linux-2.6.39.1/arch/mips/sni/pcit.c --- linux-2.6.39.1/arch/mips/sni/pcit.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/sni/pcit.c 2011-05-22 19:36:30.000000000 -0400 @@ -145,7 +145,7 @@ static void __init sni_pcit_resource_ini } -extern struct pci_ops sni_pcit_ops; +extern const struct pci_ops sni_pcit_ops; static struct pci_controller sni_pcit_controller = { .pci_ops = &sni_pcit_ops, diff -urNp linux-2.6.39.1/arch/mips/wrppmc/pci.c linux-2.6.39.1/arch/mips/wrppmc/pci.c --- linux-2.6.39.1/arch/mips/wrppmc/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mips/wrppmc/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -14,7 +14,7 @@ #include -extern struct pci_ops gt64xxx_pci0_ops; +extern const struct pci_ops gt64xxx_pci0_ops; static struct resource pci0_io_resource = { .name = "pci_0 io", diff -urNp linux-2.6.39.1/arch/mn10300/unit-asb2305/pci-asb2305.h linux-2.6.39.1/arch/mn10300/unit-asb2305/pci-asb2305.h --- linux-2.6.39.1/arch/mn10300/unit-asb2305/pci-asb2305.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mn10300/unit-asb2305/pci-asb2305.h 2011-05-22 19:36:30.000000000 -0400 @@ -39,7 +39,7 @@ extern void pcibios_resource_survey(void extern int pcibios_last_bus; extern struct pci_bus *pci_root_bus; -extern struct pci_ops *pci_root_ops; +extern const struct pci_ops *pci_root_ops; extern struct irq_routing_table *pcibios_get_irq_routing_table(void); extern int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); diff -urNp linux-2.6.39.1/arch/mn10300/unit-asb2305/pci.c linux-2.6.39.1/arch/mn10300/unit-asb2305/pci.c --- linux-2.6.39.1/arch/mn10300/unit-asb2305/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/mn10300/unit-asb2305/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -24,7 +24,7 @@ unsigned int pci_probe = 1; int pcibios_last_bus = -1; struct pci_bus *pci_root_bus; -struct pci_ops *pci_root_ops; +const struct pci_ops *pci_root_ops; /* * The accessible PCI window does not cover the entire CPU address space, but @@ -274,7 +274,7 @@ static int pci_ampci_write_config(struct } } -static struct pci_ops pci_direct_ampci = { +static const struct pci_ops pci_direct_ampci = { pci_ampci_read_config, pci_ampci_write_config, }; @@ -289,7 +289,7 @@ static struct pci_ops pci_direct_ampci = * This should be close to trivial, but it isn't, because there are buggy * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID. */ -static int __init pci_sanity_check(struct pci_ops *o) +static int __init pci_sanity_check(const struct pci_ops *o) { struct pci_bus bus; /* Fake bus and device */ u32 x; diff -urNp linux-2.6.39.1/arch/parisc/include/asm/elf.h linux-2.6.39.1/arch/parisc/include/asm/elf.h --- linux-2.6.39.1/arch/parisc/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/parisc/include/asm/elf.h 2011-05-22 19:36:30.000000000 -0400 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration.. #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE 0x10000UL + +#define PAX_DELTA_MMAP_LEN 16 +#define PAX_DELTA_STACK_LEN 16 +#endif + /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, but it's not easy, and we've already done it here. */ diff -urNp linux-2.6.39.1/arch/parisc/include/asm/pgtable.h linux-2.6.39.1/arch/parisc/include/asm/pgtable.h --- linux-2.6.39.1/arch/parisc/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/parisc/include/asm/pgtable.h 2011-05-22 19:36:30.000000000 -0400 @@ -207,6 +207,17 @@ struct vm_area_struct; #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) #define PAGE_COPY PAGE_EXECREAD #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) + +#ifdef CONFIG_PAX_PAGEEXEC +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) +#else +# define PAGE_SHARED_NOEXEC PAGE_SHARED +# define PAGE_COPY_NOEXEC PAGE_COPY +# define PAGE_READONLY_NOEXEC PAGE_READONLY +#endif + #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) diff -urNp linux-2.6.39.1/arch/parisc/kernel/module.c linux-2.6.39.1/arch/parisc/kernel/module.c --- linux-2.6.39.1/arch/parisc/kernel/module.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/parisc/kernel/module.c 2011-05-22 19:36:30.000000000 -0400 @@ -96,16 +96,38 @@ /* three functions to determine where in the module core * or init pieces the location is */ +static inline int in_init_rx(struct module *me, void *loc) +{ + return (loc >= me->module_init_rx && + loc < (me->module_init_rx + me->init_size_rx)); +} + +static inline int in_init_rw(struct module *me, void *loc) +{ + return (loc >= me->module_init_rw && + loc < (me->module_init_rw + me->init_size_rw)); +} + static inline int in_init(struct module *me, void *loc) { - return (loc >= me->module_init && - loc <= (me->module_init + me->init_size)); + return in_init_rx(me, loc) || in_init_rw(me, loc); +} + +static inline int in_core_rx(struct module *me, void *loc) +{ + return (loc >= me->module_core_rx && + loc < (me->module_core_rx + me->core_size_rx)); +} + +static inline int in_core_rw(struct module *me, void *loc) +{ + return (loc >= me->module_core_rw && + loc < (me->module_core_rw + me->core_size_rw)); } static inline int in_core(struct module *me, void *loc) { - return (loc >= me->module_core && - loc <= (me->module_core + me->core_size)); + return in_core_rx(me, loc) || in_core_rw(me, loc); } static inline int in_local(struct module *me, void *loc) @@ -365,13 +387,13 @@ int module_frob_arch_sections(CONST Elf_ } /* align things a bit */ - me->core_size = ALIGN(me->core_size, 16); - me->arch.got_offset = me->core_size; - me->core_size += gots * sizeof(struct got_entry); - - me->core_size = ALIGN(me->core_size, 16); - me->arch.fdesc_offset = me->core_size; - me->core_size += fdescs * sizeof(Elf_Fdesc); + me->core_size_rw = ALIGN(me->core_size_rw, 16); + me->arch.got_offset = me->core_size_rw; + me->core_size_rw += gots * sizeof(struct got_entry); + + me->core_size_rw = ALIGN(me->core_size_rw, 16); + me->arch.fdesc_offset = me->core_size_rw; + me->core_size_rw += fdescs * sizeof(Elf_Fdesc); me->arch.got_max = gots; me->arch.fdesc_max = fdescs; @@ -389,7 +411,7 @@ static Elf64_Word get_got(struct module BUG_ON(value == 0); - got = me->module_core + me->arch.got_offset; + got = me->module_core_rw + me->arch.got_offset; for (i = 0; got[i].addr; i++) if (got[i].addr == value) goto out; @@ -407,7 +429,7 @@ static Elf64_Word get_got(struct module #ifdef CONFIG_64BIT static Elf_Addr get_fdesc(struct module *me, unsigned long value) { - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset; + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset; if (!value) { printk(KERN_ERR "%s: zero OPD requested!\n", me->name); @@ -425,7 +447,7 @@ static Elf_Addr get_fdesc(struct module /* Create new one */ fdesc->addr = value; - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset; + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset; return (Elf_Addr)fdesc; } #endif /* CONFIG_64BIT */ @@ -849,7 +871,7 @@ register_unwind_table(struct module *me, table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; end = table + sechdrs[me->arch.unwind_section].sh_size; - gp = (Elf_Addr)me->module_core + me->arch.got_offset; + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset; DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", me->arch.unwind_section, table, end, gp); diff -urNp linux-2.6.39.1/arch/parisc/kernel/sys_parisc.c linux-2.6.39.1/arch/parisc/kernel/sys_parisc.c --- linux-2.6.39.1/arch/parisc/kernel/sys_parisc.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/parisc/kernel/sys_parisc.c 2011-05-22 19:36:30.000000000 -0400 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vma || addr + len <= vma->vm_start) + if (check_heap_stack_gap(vma, addr, len)) return addr; addr = vma->vm_end; } @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vma || addr + len <= vma->vm_start) + if (check_heap_stack_gap(vma, addr, len)) return addr; addr = DCACHE_ALIGN(vma->vm_end - offset) + offset; if (addr < vma->vm_end) /* handle wraparound */ @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str if (flags & MAP_FIXED) return addr; if (!addr) - addr = TASK_UNMAPPED_BASE; + addr = current->mm->mmap_base; if (filp) { addr = get_shared_area(filp->f_mapping, addr, len, pgoff); diff -urNp linux-2.6.39.1/arch/parisc/kernel/traps.c linux-2.6.39.1/arch/parisc/kernel/traps.c --- linux-2.6.39.1/arch/parisc/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/parisc/kernel/traps.c 2011-05-22 19:36:30.000000000 -0400 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod down_read(¤t->mm->mmap_sem); vma = find_vma(current->mm,regs->iaoq[0]); - if (vma && (regs->iaoq[0] >= vma->vm_start) - && (vma->vm_flags & VM_EXEC)) { - + if (vma && (regs->iaoq[0] >= vma->vm_start)) { fault_address = regs->iaoq[0]; fault_space = regs->iasq[0]; diff -urNp linux-2.6.39.1/arch/parisc/mm/fault.c linux-2.6.39.1/arch/parisc/mm/fault.c --- linux-2.6.39.1/arch/parisc/mm/fault.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/parisc/mm/fault.c 2011-05-22 19:36:30.000000000 -0400 @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex static unsigned long parisc_acctyp(unsigned long code, unsigned int inst) { - if (code == 6 || code == 16) + if (code == 6 || code == 7 || code == 16) return VM_EXEC; switch (inst & 0xf0000000) { @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign } #endif +#ifdef CONFIG_PAX_PAGEEXEC +/* + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address) + * + * returns 1 when task should be killed + * 2 when rt_sigreturn trampoline was detected + * 3 when unpatched PLT trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#ifdef CONFIG_PAX_EMUPLT + int err; + + do { /* PaX: unpatched PLT emulation */ + unsigned int bl, depwi; + + err = get_user(bl, (unsigned int *)instruction_pointer(regs)); + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4)); + + if (err) + break; + + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) { + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12; + + err = get_user(ldw, (unsigned int *)addr); + err |= get_user(bv, (unsigned int *)(addr+4)); + err |= get_user(ldw2, (unsigned int *)(addr+8)); + + if (err) + break; + + if (ldw == 0x0E801096U && + bv == 0xEAC0C000U && + ldw2 == 0x0E881095U) + { + unsigned int resolver, map; + + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8)); + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12)); + if (err) + break; + + regs->gr[20] = instruction_pointer(regs)+8; + regs->gr[21] = map; + regs->gr[22] = resolver; + regs->iaoq[0] = resolver | 3UL; + regs->iaoq[1] = regs->iaoq[0] + 4; + return 3; + } + } + } while (0); +#endif + +#ifdef CONFIG_PAX_EMUTRAMP + +#ifndef CONFIG_PAX_EMUSIGRT + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP)) + return 1; +#endif + + do { /* PaX: rt_sigreturn emulation */ + unsigned int ldi1, ldi2, bel, nop; + + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs)); + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4)); + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8)); + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12)); + + if (err) + break; + + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) && + ldi2 == 0x3414015AU && + bel == 0xE4008200U && + nop == 0x08000240U) + { + regs->gr[25] = (ldi1 & 2) >> 1; + regs->gr[20] = __NR_rt_sigreturn; + regs->gr[31] = regs->iaoq[1] + 16; + regs->sr[0] = regs->iasq[1]; + regs->iaoq[0] = 0x100UL; + regs->iaoq[1] = regs->iaoq[0] + 4; + regs->iasq[0] = regs->sr[2]; + regs->iasq[1] = regs->sr[2]; + return 2; + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif + int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fix; @@ -192,8 +303,33 @@ good_area: acc_type = parisc_acctyp(code,regs->iir); - if ((vma->vm_flags & acc_type) != acc_type) + if ((vma->vm_flags & acc_type) != acc_type) { + +#ifdef CONFIG_PAX_PAGEEXEC + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) && + (address & ~3UL) == instruction_pointer(regs)) + { + up_read(&mm->mmap_sem); + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_PAX_EMUPLT + case 3: + return; +#endif + +#ifdef CONFIG_PAX_EMUTRAMP + case 2: + return; +#endif + + } + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]); + do_group_exit(SIGKILL); + } +#endif + goto bad_area; + } /* * If for any reason at all we couldn't handle the fault, make diff -urNp linux-2.6.39.1/arch/powerpc/include/asm/device.h linux-2.6.39.1/arch/powerpc/include/asm/device.h --- linux-2.6.39.1/arch/powerpc/include/asm/device.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/include/asm/device.h 2011-05-22 19:36:30.000000000 -0400 @@ -17,7 +17,7 @@ struct device_node; */ struct dev_archdata { /* DMA operations on that device */ - struct dma_map_ops *dma_ops; + const struct dma_map_ops *dma_ops; /* * When an iommu is in use, dma_data is used as a ptr to the base of the diff -urNp linux-2.6.39.1/arch/powerpc/include/asm/dma-mapping.h linux-2.6.39.1/arch/powerpc/include/asm/dma-mapping.h --- linux-2.6.39.1/arch/powerpc/include/asm/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/include/asm/dma-mapping.h 2011-05-22 19:36:30.000000000 -0400 @@ -67,12 +67,13 @@ static inline unsigned long device_to_ma /* * Available generic sets of operations */ +/* cannot be const */ #ifdef CONFIG_PPC64 -extern struct dma_map_ops dma_iommu_ops; +extern const struct dma_map_ops dma_iommu_ops; #endif -extern struct dma_map_ops dma_direct_ops; +extern const struct dma_map_ops dma_direct_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { /* We don't handle the NULL dev case for ISA for now. We could * do it via an out of line call but it is not needed for now. The @@ -85,7 +86,7 @@ static inline struct dma_map_ops *get_dm return dev->archdata.dma_ops; } -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops) { dev->archdata.dma_ops = ops; } @@ -119,7 +120,7 @@ static inline void set_dma_offset(struct static inline int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); if (unlikely(dma_ops == NULL)) return 0; @@ -133,7 +134,7 @@ extern int dma_set_mask(struct device *d static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); void *cpu_addr; BUG_ON(!dma_ops); @@ -148,7 +149,7 @@ static inline void *dma_alloc_coherent(s static inline void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); BUG_ON(!dma_ops); @@ -159,7 +160,7 @@ static inline void dma_free_coherent(str static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); if (dma_ops->mapping_error) return dma_ops->mapping_error(dev, dma_addr); diff -urNp linux-2.6.39.1/arch/powerpc/include/asm/elf.h linux-2.6.39.1/arch/powerpc/include/asm/elf.h --- linux-2.6.39.1/arch/powerpc/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/include/asm/elf.h 2011-05-22 19:36:30.000000000 -0400 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ -extern unsigned long randomize_et_dyn(unsigned long base); -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000)) +#define ELF_ET_DYN_BASE (0x20000000) + +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (0x10000000UL) + +#ifdef __powerpc64__ +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28) +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28) +#else +#define PAX_DELTA_MMAP_LEN 15 +#define PAX_DELTA_STACK_LEN 15 +#endif +#endif /* * Our registers are always unsigned longs, whether we're a 32 bit @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s (0x7ff >> (PAGE_SHIFT - 12)) : \ (0x3ffff >> (PAGE_SHIFT - 12))) -extern unsigned long arch_randomize_brk(struct mm_struct *mm); -#define arch_randomize_brk arch_randomize_brk - #endif /* __KERNEL__ */ /* diff -urNp linux-2.6.39.1/arch/powerpc/include/asm/iommu.h linux-2.6.39.1/arch/powerpc/include/asm/iommu.h --- linux-2.6.39.1/arch/powerpc/include/asm/iommu.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/include/asm/iommu.h 2011-05-22 19:36:30.000000000 -0400 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi extern void iommu_init_early_dart(void); extern void iommu_init_early_pasemi(void); +/* dma-iommu.c */ +extern int dma_iommu_dma_supported(struct device *dev, u64 mask); + #ifdef CONFIG_PCI extern void pci_iommu_init(void); extern void pci_direct_iommu_init(void); diff -urNp linux-2.6.39.1/arch/powerpc/include/asm/kmap_types.h linux-2.6.39.1/arch/powerpc/include/asm/kmap_types.h --- linux-2.6.39.1/arch/powerpc/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/include/asm/kmap_types.h 2011-05-22 19:36:30.000000000 -0400 @@ -27,6 +27,7 @@ enum km_type { KM_PPC_SYNC_PAGE, KM_PPC_SYNC_ICACHE, KM_KDB, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.39.1/arch/powerpc/include/asm/page_64.h linux-2.6.39.1/arch/powerpc/include/asm/page_64.h --- linux-2.6.39.1/arch/powerpc/include/asm/page_64.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/include/asm/page_64.h 2011-05-22 19:36:30.000000000 -0400 @@ -172,15 +172,18 @@ do { \ * stack by default, so in the absence of a PT_GNU_STACK program header * we turn execute permission off. */ -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_STACK_DEFAULT_FLAGS32 \ + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#ifndef CONFIG_PAX_PAGEEXEC #define VM_STACK_DEFAULT_FLAGS \ (is_32bit_task() ? \ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) +#endif #include diff -urNp linux-2.6.39.1/arch/powerpc/include/asm/page.h linux-2.6.39.1/arch/powerpc/include/asm/page.h --- linux-2.6.39.1/arch/powerpc/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/include/asm/page.h 2011-05-22 19:36:30.000000000 -0400 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr; * and needs to be executable. This means the whole heap ends * up being executable. */ -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS32 \ + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr; #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) #endif +#define ktla_ktva(addr) (addr) +#define ktva_ktla(addr) (addr) + #ifndef __ASSEMBLY__ #undef STRICT_MM_TYPECHECKS diff -urNp linux-2.6.39.1/arch/powerpc/include/asm/pci.h linux-2.6.39.1/arch/powerpc/include/asm/pci.h --- linux-2.6.39.1/arch/powerpc/include/asm/pci.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/include/asm/pci.h 2011-05-22 19:36:30.000000000 -0400 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq } #ifdef CONFIG_PCI -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops); -extern struct dma_map_ops *get_pci_dma_ops(void); +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops); +extern const struct dma_map_ops *get_pci_dma_ops(void); #else /* CONFIG_PCI */ #define set_pci_dma_ops(d) #define get_pci_dma_ops() NULL diff -urNp linux-2.6.39.1/arch/powerpc/include/asm/pgtable.h linux-2.6.39.1/arch/powerpc/include/asm/pgtable.h --- linux-2.6.39.1/arch/powerpc/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/include/asm/pgtable.h 2011-05-22 19:36:30.000000000 -0400 @@ -2,6 +2,7 @@ #define _ASM_POWERPC_PGTABLE_H #ifdef __KERNEL__ +#include #ifndef __ASSEMBLY__ #include /* For TASK_SIZE */ #include diff -urNp linux-2.6.39.1/arch/powerpc/include/asm/pte-hash32.h linux-2.6.39.1/arch/powerpc/include/asm/pte-hash32.h --- linux-2.6.39.1/arch/powerpc/include/asm/pte-hash32.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/include/asm/pte-hash32.h 2011-05-22 19:36:30.000000000 -0400 @@ -21,6 +21,7 @@ #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */ #define _PAGE_USER 0x004 /* usermode access allowed */ #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */ +#define _PAGE_EXEC _PAGE_GUARDED #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ diff -urNp linux-2.6.39.1/arch/powerpc/include/asm/reg.h linux-2.6.39.1/arch/powerpc/include/asm/reg.h --- linux-2.6.39.1/arch/powerpc/include/asm/reg.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/include/asm/reg.h 2011-05-22 19:36:30.000000000 -0400 @@ -201,6 +201,7 @@ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ #define DSISR_NOHPTE 0x40000000 /* no translation found */ +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */ #define DSISR_PROTFAULT 0x08000000 /* protection fault */ #define DSISR_ISSTORE 0x02000000 /* access was a store */ #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ diff -urNp linux-2.6.39.1/arch/powerpc/include/asm/swiotlb.h linux-2.6.39.1/arch/powerpc/include/asm/swiotlb.h --- linux-2.6.39.1/arch/powerpc/include/asm/swiotlb.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/include/asm/swiotlb.h 2011-05-22 19:36:30.000000000 -0400 @@ -13,7 +13,7 @@ #include -extern struct dma_map_ops swiotlb_dma_ops; +extern const struct dma_map_ops swiotlb_dma_ops; static inline void dma_mark_clean(void *addr, size_t size) {} diff -urNp linux-2.6.39.1/arch/powerpc/include/asm/system.h linux-2.6.39.1/arch/powerpc/include/asm/system.h --- linux-2.6.39.1/arch/powerpc/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/include/asm/system.h 2011-05-22 19:36:30.000000000 -0400 @@ -533,7 +533,7 @@ __cmpxchg_local(volatile void *ptr, unsi #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #endif -extern unsigned long arch_align_stack(unsigned long sp); +#define arch_align_stack(x) ((x) & ~0xfUL) /* Used in very early kernel initialization. */ extern unsigned long reloc_offset(void); diff -urNp linux-2.6.39.1/arch/powerpc/include/asm/uaccess.h linux-2.6.39.1/arch/powerpc/include/asm/uaccess.h --- linux-2.6.39.1/arch/powerpc/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/include/asm/uaccess.h 2011-05-22 19:36:30.000000000 -0400 @@ -13,6 +13,8 @@ #define VERIFY_READ 0 #define VERIFY_WRITE 1 +extern void check_object_size(const void *ptr, unsigned long n, bool to); + /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with @@ -327,52 +329,6 @@ do { \ extern unsigned long __copy_tofrom_user(void __user *to, const void __user *from, unsigned long size); -#ifndef __powerpc64__ - -static inline unsigned long copy_from_user(void *to, - const void __user *from, unsigned long n) -{ - unsigned long over; - - if (access_ok(VERIFY_READ, from, n)) - return __copy_tofrom_user((__force void __user *)to, from, n); - if ((unsigned long)from < TASK_SIZE) { - over = (unsigned long)from + n - TASK_SIZE; - return __copy_tofrom_user((__force void __user *)to, from, - n - over) + over; - } - return n; -} - -static inline unsigned long copy_to_user(void __user *to, - const void *from, unsigned long n) -{ - unsigned long over; - - if (access_ok(VERIFY_WRITE, to, n)) - return __copy_tofrom_user(to, (__force void __user *)from, n); - if ((unsigned long)to < TASK_SIZE) { - over = (unsigned long)to + n - TASK_SIZE; - return __copy_tofrom_user(to, (__force void __user *)from, - n - over) + over; - } - return n; -} - -#else /* __powerpc64__ */ - -#define __copy_in_user(to, from, size) \ - __copy_tofrom_user((to), (from), (size)) - -extern unsigned long copy_from_user(void *to, const void __user *from, - unsigned long n); -extern unsigned long copy_to_user(void __user *to, const void *from, - unsigned long n); -extern unsigned long copy_in_user(void __user *to, const void __user *from, - unsigned long n); - -#endif /* __powerpc64__ */ - static inline unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_ if (ret == 0) return 0; } + + if (!__builtin_constant_p(n)) + check_object_size(to, n, false); + return __copy_tofrom_user((__force void __user *)to, from, n); } @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us if (ret == 0) return 0; } + + if (!__builtin_constant_p(n)) + check_object_size(from, n, true); + return __copy_tofrom_user(to, (__force const void __user *)from, n); } @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us return __copy_to_user_inatomic(to, from, size); } +#ifndef __powerpc64__ + +static inline unsigned long __must_check copy_from_user(void *to, + const void __user *from, unsigned long n) +{ + unsigned long over; + + if ((long)n < 0) + return n; + + if (access_ok(VERIFY_READ, from, n)) { + if (!__builtin_constant_p(n)) + check_object_size(to, n, false); + return __copy_tofrom_user((__force void __user *)to, from, n); + } + if ((unsigned long)from < TASK_SIZE) { + over = (unsigned long)from + n - TASK_SIZE; + if (!__builtin_constant_p(n - over)) + check_object_size(to, n - over, false); + return __copy_tofrom_user((__force void __user *)to, from, + n - over) + over; + } + return n; +} + +static inline unsigned long __must_check copy_to_user(void __user *to, + const void *from, unsigned long n) +{ + unsigned long over; + + if ((long)n < 0) + return n; + + if (access_ok(VERIFY_WRITE, to, n)) { + if (!__builtin_constant_p(n)) + check_object_size(from, n, true); + return __copy_tofrom_user(to, (__force void __user *)from, n); + } + if ((unsigned long)to < TASK_SIZE) { + over = (unsigned long)to + n - TASK_SIZE; + if (!__builtin_constant_p(n)) + check_object_size(from, n - over, true); + return __copy_tofrom_user(to, (__force void __user *)from, + n - over) + over; + } + return n; +} + +#else /* __powerpc64__ */ + +#define __copy_in_user(to, from, size) \ + __copy_tofrom_user((to), (from), (size)) + +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) +{ + if ((long)n < 0 || n > INT_MAX) + return n; + + if (!__builtin_constant_p(n)) + check_object_size(to, n, false); + + if (likely(access_ok(VERIFY_READ, from, n))) + n = __copy_from_user(to, from, n); + else + memset(to, 0, n); + return n; +} + +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) +{ + if ((long)n < 0 || n > INT_MAX) + return n; + + if (likely(access_ok(VERIFY_WRITE, to, n))) { + if (!__builtin_constant_p(n)) + check_object_size(from, n, true); + n = __copy_to_user(to, from, n); + } + return n; +} + +extern unsigned long copy_in_user(void __user *to, const void __user *from, + unsigned long n); + +#endif /* __powerpc64__ */ + extern unsigned long __clear_user(void __user *addr, unsigned long size); static inline unsigned long clear_user(void __user *addr, unsigned long size) diff -urNp linux-2.6.39.1/arch/powerpc/kernel/dma.c linux-2.6.39.1/arch/powerpc/kernel/dma.c --- linux-2.6.39.1/arch/powerpc/kernel/dma.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/kernel/dma.c 2011-05-22 19:36:30.000000000 -0400 @@ -136,7 +136,7 @@ static inline void dma_direct_sync_singl } #endif -struct dma_map_ops dma_direct_ops = { +const struct dma_map_ops dma_direct_ops = { .alloc_coherent = dma_direct_alloc_coherent, .free_coherent = dma_direct_free_coherent, .map_sg = dma_direct_map_sg, @@ -157,7 +157,7 @@ EXPORT_SYMBOL(dma_direct_ops); int dma_set_mask(struct device *dev, u64 dma_mask) { - struct dma_map_ops *dma_ops = get_dma_ops(dev); + const struct dma_map_ops *dma_ops = get_dma_ops(dev); if (ppc_md.dma_set_mask) return ppc_md.dma_set_mask(dev, dma_mask); diff -urNp linux-2.6.39.1/arch/powerpc/kernel/dma-iommu.c linux-2.6.39.1/arch/powerpc/kernel/dma-iommu.c --- linux-2.6.39.1/arch/powerpc/kernel/dma-iommu.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/kernel/dma-iommu.c 2011-05-22 19:36:30.000000000 -0400 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de } /* We support DMA to/from any memory page via the iommu */ -static int dma_iommu_dma_supported(struct device *dev, u64 mask) +int dma_iommu_dma_supported(struct device *dev, u64 mask) { struct iommu_table *tbl = get_iommu_table_base(dev); @@ -90,7 +90,7 @@ static int dma_iommu_dma_supported(struc return 1; } -struct dma_map_ops dma_iommu_ops = { +struct dma_map_ops dma_iommu_ops = { /* cannot be const, see arch/powerpc/platforms/cell/iommu.c */ .alloc_coherent = dma_iommu_alloc_coherent, .free_coherent = dma_iommu_free_coherent, .map_sg = dma_iommu_map_sg, diff -urNp linux-2.6.39.1/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.39.1/arch/powerpc/kernel/dma-swiotlb.c --- linux-2.6.39.1/arch/powerpc/kernel/dma-swiotlb.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/kernel/dma-swiotlb.c 2011-05-22 19:36:30.000000000 -0400 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable; * map_page, and unmap_page on highmem, use normal dma_ops * for everything else. */ -struct dma_map_ops swiotlb_dma_ops = { +const struct dma_map_ops swiotlb_dma_ops = { .alloc_coherent = dma_direct_alloc_coherent, .free_coherent = dma_direct_free_coherent, .map_sg = swiotlb_map_sg_attrs, diff -urNp linux-2.6.39.1/arch/powerpc/kernel/exceptions-64e.S linux-2.6.39.1/arch/powerpc/kernel/exceptions-64e.S --- linux-2.6.39.1/arch/powerpc/kernel/exceptions-64e.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/kernel/exceptions-64e.S 2011-05-22 19:36:30.000000000 -0400 @@ -495,6 +495,7 @@ storage_fault_common: std r14,_DAR(r1) std r15,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD + bl .save_nvgprs mr r4,r14 mr r5,r15 ld r14,PACA_EXGEN+EX_R14(r13) @@ -504,8 +505,7 @@ storage_fault_common: cmpdi r3,0 bne- 1f b .ret_from_except_lite -1: bl .save_nvgprs - mr r5,r3 +1: mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD ld r4,_DAR(r1) bl .bad_page_fault diff -urNp linux-2.6.39.1/arch/powerpc/kernel/exceptions-64s.S linux-2.6.39.1/arch/powerpc/kernel/exceptions-64s.S --- linux-2.6.39.1/arch/powerpc/kernel/exceptions-64s.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/kernel/exceptions-64s.S 2011-05-22 19:36:30.000000000 -0400 @@ -848,10 +848,10 @@ handle_page_fault: 11: ld r4,_DAR(r1) ld r5,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD + bl .save_nvgprs bl .do_page_fault cmpdi r3,0 beq+ 13f - bl .save_nvgprs mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD lwz r4,_DAR(r1) diff -urNp linux-2.6.39.1/arch/powerpc/kernel/ibmebus.c linux-2.6.39.1/arch/powerpc/kernel/ibmebus.c --- linux-2.6.39.1/arch/powerpc/kernel/ibmebus.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/kernel/ibmebus.c 2011-05-22 19:36:30.000000000 -0400 @@ -128,7 +128,7 @@ static int ibmebus_dma_supported(struct return 1; } -static struct dma_map_ops ibmebus_dma_ops = { +static const struct dma_map_ops ibmebus_dma_ops = { .alloc_coherent = ibmebus_alloc_coherent, .free_coherent = ibmebus_free_coherent, .map_sg = ibmebus_map_sg, diff -urNp linux-2.6.39.1/arch/powerpc/kernel/kgdb.c linux-2.6.39.1/arch/powerpc/kernel/kgdb.c --- linux-2.6.39.1/arch/powerpc/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/kernel/kgdb.c 2011-05-22 19:36:30.000000000 -0400 @@ -422,7 +422,7 @@ int kgdb_arch_handle_exception(int vecto /* * Global data */ -struct kgdb_arch arch_kgdb_ops = { +const struct kgdb_arch arch_kgdb_ops = { .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08}, }; diff -urNp linux-2.6.39.1/arch/powerpc/kernel/module_32.c linux-2.6.39.1/arch/powerpc/kernel/module_32.c --- linux-2.6.39.1/arch/powerpc/kernel/module_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/kernel/module_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr me->arch.core_plt_section = i; } if (!me->arch.core_plt_section || !me->arch.init_plt_section) { - printk("Module doesn't contain .plt or .init.plt sections.\n"); + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name); return -ENOEXEC; } @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); /* Init, or core PLT? */ - if (location >= mod->module_core - && location < mod->module_core + mod->core_size) + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) || + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw)) entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; - else + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) || + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw)) entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; + else { + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name); + return ~0UL; + } /* Find this entry, or if that fails, the next avail. entry */ while (entry->jump[0]) { diff -urNp linux-2.6.39.1/arch/powerpc/kernel/module.c linux-2.6.39.1/arch/powerpc/kernel/module.c --- linux-2.6.39.1/arch/powerpc/kernel/module.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/kernel/module.c 2011-05-22 19:36:30.000000000 -0400 @@ -31,11 +31,24 @@ LIST_HEAD(module_bug_list); +#ifdef CONFIG_PAX_KERNEXEC void *module_alloc(unsigned long size) { if (size == 0) return NULL; + return vmalloc(size); +} + +void *module_alloc_exec(unsigned long size) +#else +void *module_alloc(unsigned long size) +#endif + +{ + if (size == 0) + return NULL; + return vmalloc_exec(size); } @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi vfree(module_region); } +#ifdef CONFIG_PAX_KERNEXEC +void module_free_exec(struct module *mod, void *module_region) +{ + module_free(mod, module_region); +} +#endif + static const Elf_Shdr *find_section(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, const char *name) diff -urNp linux-2.6.39.1/arch/powerpc/kernel/pci-common.c linux-2.6.39.1/arch/powerpc/kernel/pci-common.c --- linux-2.6.39.1/arch/powerpc/kernel/pci-common.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/kernel/pci-common.c 2011-05-22 19:36:30.000000000 -0400 @@ -53,14 +53,14 @@ resource_size_t isa_mem_base; unsigned int ppc_pci_flags = 0; -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops; -void set_pci_dma_ops(struct dma_map_ops *dma_ops) +void set_pci_dma_ops(const struct dma_map_ops *dma_ops) { pci_dma_ops = dma_ops; } -struct dma_map_ops *get_pci_dma_ops(void) +const struct dma_map_ops *get_pci_dma_ops(void) { return pci_dma_ops; } @@ -1639,7 +1639,7 @@ null_write_config(struct pci_bus *bus, u return PCIBIOS_DEVICE_NOT_FOUND; } -static struct pci_ops null_pci_ops = +static const struct pci_ops null_pci_ops = { .read = null_read_config, .write = null_write_config, diff -urNp linux-2.6.39.1/arch/powerpc/kernel/process.c linux-2.6.39.1/arch/powerpc/kernel/process.c --- linux-2.6.39.1/arch/powerpc/kernel/process.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/kernel/process.c 2011-05-22 19:41:32.000000000 -0400 @@ -655,8 +655,8 @@ void show_regs(struct pt_regs * regs) * Lookup NIP late so we have the best change of getting the * above info out without failing */ - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip); + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link); #endif show_stack(current, (unsigned long *) regs->gpr[1]); if (!user_mode(regs)) @@ -1146,10 +1146,10 @@ void show_stack(struct task_struct *tsk, newsp = stack[0]; ip = stack[STACK_FRAME_LR_SAVE]; if (!firstframe || ip != lr) { - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if ((ip == rth || ip == mrth) && curr_frame >= 0) { - printk(" (%pS)", + printk(" (%pA)", (void *)current->ret_stack[curr_frame].ret); curr_frame--; } @@ -1169,7 +1169,7 @@ void show_stack(struct task_struct *tsk, struct pt_regs *regs = (struct pt_regs *) (sp + STACK_FRAME_OVERHEAD); lr = regs->link; - printk("--- Exception: %lx at %pS\n LR = %pS\n", + printk("--- Exception: %lx at %pA\n LR = %pA\n", regs->trap, (void *)regs->nip, (void *)lr); firstframe = 1; } @@ -1244,58 +1244,3 @@ void thread_info_cache_init(void) } #endif /* THREAD_SHIFT < PAGE_SHIFT */ - -unsigned long arch_align_stack(unsigned long sp) -{ - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) - sp -= get_random_int() & ~PAGE_MASK; - return sp & ~0xf; -} - -static inline unsigned long brk_rnd(void) -{ - unsigned long rnd = 0; - - /* 8MB for 32bit, 1GB for 64bit */ - if (is_32bit_task()) - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); - else - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); - - return rnd << PAGE_SHIFT; -} - -unsigned long arch_randomize_brk(struct mm_struct *mm) -{ - unsigned long base = mm->brk; - unsigned long ret; - -#ifdef CONFIG_PPC_STD_MMU_64 - /* - * If we are using 1TB segments and we are allowed to randomise - * the heap, we can put it above 1TB so it is backed by a 1TB - * segment. Otherwise the heap will be in the bottom 1TB - * which always uses 256MB segments and this may result in a - * performance penalty. - */ - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); -#endif - - ret = PAGE_ALIGN(base + brk_rnd()); - - if (ret < mm->brk) - return mm->brk; - - return ret; -} - -unsigned long randomize_et_dyn(unsigned long base) -{ - unsigned long ret = PAGE_ALIGN(base + brk_rnd()); - - if (ret < base) - return base; - - return ret; -} diff -urNp linux-2.6.39.1/arch/powerpc/kernel/rtas_pci.c linux-2.6.39.1/arch/powerpc/kernel/rtas_pci.c --- linux-2.6.39.1/arch/powerpc/kernel/rtas_pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/kernel/rtas_pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -149,7 +149,7 @@ static int rtas_pci_write_config(struct return PCIBIOS_DEVICE_NOT_FOUND; } -static struct pci_ops rtas_pci_ops = { +static const struct pci_ops rtas_pci_ops = { .read = rtas_pci_read_config, .write = rtas_pci_write_config, }; diff -urNp linux-2.6.39.1/arch/powerpc/kernel/signal_32.c linux-2.6.39.1/arch/powerpc/kernel/signal_32.c --- linux-2.6.39.1/arch/powerpc/kernel/signal_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/kernel/signal_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -858,7 +858,7 @@ int handle_rt_signal32(unsigned long sig /* Save user registers on the stack */ frame = &rt_sf->uc.uc_mcontext; addr = frame; - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) { if (save_user_regs(regs, frame, 0, 1)) goto badframe; regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp; diff -urNp linux-2.6.39.1/arch/powerpc/kernel/signal_64.c linux-2.6.39.1/arch/powerpc/kernel/signal_64.c --- linux-2.6.39.1/arch/powerpc/kernel/signal_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/kernel/signal_64.c 2011-05-22 19:36:30.000000000 -0400 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct current->thread.fpscr.val = 0; /* Set up to return from userspace. */ - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) { regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp; } else { err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); diff -urNp linux-2.6.39.1/arch/powerpc/kernel/vdso.c linux-2.6.39.1/arch/powerpc/kernel/vdso.c --- linux-2.6.39.1/arch/powerpc/kernel/vdso.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/kernel/vdso.c 2011-05-22 19:36:30.000000000 -0400 @@ -36,6 +36,7 @@ #include #include #include +#include #include "setup.h" @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l vdso_base = VDSO32_MBASE; #endif - current->mm->context.vdso_base = 0; + current->mm->context.vdso_base = ~0UL; /* vDSO has a problem and was disabled, just don't "enable" it for the * process @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l vdso_base = get_unmapped_area(NULL, vdso_base, (vdso_pages << PAGE_SHIFT) + ((VDSO_ALIGNMENT - 1) & PAGE_MASK), - 0, 0); + 0, MAP_PRIVATE | MAP_EXECUTABLE); if (IS_ERR_VALUE(vdso_base)) { rc = vdso_base; goto fail_mmapsem; diff -urNp linux-2.6.39.1/arch/powerpc/kernel/vio.c linux-2.6.39.1/arch/powerpc/kernel/vio.c --- linux-2.6.39.1/arch/powerpc/kernel/vio.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/kernel/vio.c 2011-05-22 19:36:30.000000000 -0400 @@ -605,11 +605,12 @@ static int vio_dma_iommu_dma_supported(s return dma_iommu_ops.dma_supported(dev, mask); } -struct dma_map_ops vio_dma_mapping_ops = { +const struct dma_map_ops vio_dma_mapping_ops = { .alloc_coherent = vio_dma_iommu_alloc_coherent, .free_coherent = vio_dma_iommu_free_coherent, .map_sg = vio_dma_iommu_map_sg, .unmap_sg = vio_dma_iommu_unmap_sg, + .dma_supported = dma_iommu_dma_supported, .map_page = vio_dma_iommu_map_page, .unmap_page = vio_dma_iommu_unmap_page, .dma_supported = vio_dma_iommu_dma_supported, diff -urNp linux-2.6.39.1/arch/powerpc/lib/usercopy_64.c linux-2.6.39.1/arch/powerpc/lib/usercopy_64.c --- linux-2.6.39.1/arch/powerpc/lib/usercopy_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/lib/usercopy_64.c 2011-05-22 19:36:30.000000000 -0400 @@ -9,22 +9,6 @@ #include #include -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) -{ - if (likely(access_ok(VERIFY_READ, from, n))) - n = __copy_from_user(to, from, n); - else - memset(to, 0, n); - return n; -} - -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) -{ - if (likely(access_ok(VERIFY_WRITE, to, n))) - n = __copy_to_user(to, from, n); - return n; -} - unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long n) { @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user * return n; } -EXPORT_SYMBOL(copy_from_user); -EXPORT_SYMBOL(copy_to_user); EXPORT_SYMBOL(copy_in_user); diff -urNp linux-2.6.39.1/arch/powerpc/mm/fault.c linux-2.6.39.1/arch/powerpc/mm/fault.c --- linux-2.6.39.1/arch/powerpc/mm/fault.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/mm/fault.c 2011-05-22 19:36:30.000000000 -0400 @@ -31,6 +31,10 @@ #include #include #include +#include +#include +#include +#include #include #include @@ -42,6 +46,7 @@ #include #include #include +#include #ifdef CONFIG_KPROBES static inline int notify_page_fault(struct pt_regs *regs) @@ -65,6 +70,33 @@ static inline int notify_page_fault(stru } #endif +#ifdef CONFIG_PAX_PAGEEXEC +/* + * PaX: decide what to do with offenders (regs->nip = fault address) + * + * returns 1 when task should be killed + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + return 1; +} + +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int __user *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif + /* * Check whether the instruction at regs->nip is a store using * an update addressing form which will update r1. @@ -135,7 +167,7 @@ int __kprobes do_page_fault(struct pt_re * indicate errors in DSISR but can validly be set in SRR1. */ if (trap == 0x400) - error_code &= 0x48200000; + error_code &= 0x58200000; else is_write = error_code & DSISR_ISSTORE; #else @@ -258,7 +290,7 @@ good_area: * "undefined". Of those that can be set, this is the only * one which seems bad. */ - if (error_code & 0x10000000) + if (error_code & DSISR_GUARDED) /* Guarded storage error. */ goto bad_area; #endif /* CONFIG_8xx */ @@ -273,7 +305,7 @@ good_area: * processors use the same I/D cache coherency mechanism * as embedded. */ - if (error_code & DSISR_PROTFAULT) + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED)) goto bad_area; #endif /* CONFIG_PPC_STD_MMU */ @@ -342,6 +374,23 @@ bad_area: bad_area_nosemaphore: /* User mode accesses cause a SIGSEGV */ if (user_mode(regs)) { + +#ifdef CONFIG_PAX_PAGEEXEC + if (mm->pax_flags & MF_PAX_PAGEEXEC) { +#ifdef CONFIG_PPC_STD_MMU + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) { +#else + if (is_exec && regs->nip == address) { +#endif + switch (pax_handle_fetch_fault(regs)) { + } + + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]); + do_group_exit(SIGKILL); + } + } +#endif + _exception(SIGSEGV, regs, code, address); return 0; } diff -urNp linux-2.6.39.1/arch/powerpc/mm/mmap_64.c linux-2.6.39.1/arch/powerpc/mm/mmap_64.c --- linux-2.6.39.1/arch/powerpc/mm/mmap_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/mm/mmap_64.c 2011-05-22 19:36:30.000000000 -0400 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str */ if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } diff -urNp linux-2.6.39.1/arch/powerpc/mm/slice.c linux-2.6.39.1/arch/powerpc/mm/slice.c --- linux-2.6.39.1/arch/powerpc/mm/slice.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/mm/slice.c 2011-05-22 19:36:30.000000000 -0400 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_ if ((mm->task_size - len) < addr) return 0; vma = find_vma(mm, addr); - return (!vma || (addr + len) <= vma->vm_start); + return check_heap_stack_gap(vma, addr, len); } static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) @@ -256,7 +256,7 @@ full_search: addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT); continue; } - if (!vma || addr + len <= vma->vm_start) { + if (check_heap_stack_gap(vma, addr, len)) { /* * Remember the place where we stopped the search: */ @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top } } - addr = mm->mmap_base; - while (addr > len) { + if (mm->mmap_base < len) + addr = -ENOMEM; + else + addr = mm->mmap_base - len; + + while (!IS_ERR_VALUE(addr)) { /* Go down by chunk size */ - addr = _ALIGN_DOWN(addr - len, 1ul << pshift); + addr = _ALIGN_DOWN(addr, 1ul << pshift); /* Check for hit with different page size */ mask = slice_range_to_mask(addr, len); @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top * return with success: */ vma = find_vma(mm, addr); - if (!vma || (addr + len) <= vma->vm_start) { + if (check_heap_stack_gap(vma, addr, len)) { /* remember the address as a hint for next time */ if (use_cache) mm->free_area_cache = addr; @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ - addr = vma->vm_start; + addr = skip_heap_stack_gap(vma, len); } /* @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un if (fixed && addr > (mm->task_size - len)) return -EINVAL; +#ifdef CONFIG_PAX_RANDMMAP + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP)) + addr = 0; +#endif + /* If hint, make sure it matches our alignment restrictions */ if (!fixed && addr) { addr = _ALIGN_UP(addr, 1ul << pshift); diff -urNp linux-2.6.39.1/arch/powerpc/platforms/52xx/efika.c linux-2.6.39.1/arch/powerpc/platforms/52xx/efika.c --- linux-2.6.39.1/arch/powerpc/platforms/52xx/efika.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/platforms/52xx/efika.c 2011-05-22 19:36:30.000000000 -0400 @@ -60,7 +60,7 @@ static int rtas_write_config(struct pci_ return rval ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; } -static struct pci_ops rtas_pci_ops = { +static const struct pci_ops rtas_pci_ops = { .read = rtas_read_config, .write = rtas_write_config, }; diff -urNp linux-2.6.39.1/arch/powerpc/platforms/cell/celleb_pci.c linux-2.6.39.1/arch/powerpc/platforms/cell/celleb_pci.c --- linux-2.6.39.1/arch/powerpc/platforms/cell/celleb_pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/platforms/cell/celleb_pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -234,7 +234,7 @@ static int celleb_fake_pci_write_config( return PCIBIOS_SUCCESSFUL; } -static struct pci_ops celleb_fake_pci_ops = { +static const struct pci_ops celleb_fake_pci_ops = { .read = celleb_fake_pci_read_config, .write = celleb_fake_pci_write_config, }; diff -urNp linux-2.6.39.1/arch/powerpc/platforms/cell/celleb_scc_epci.c linux-2.6.39.1/arch/powerpc/platforms/cell/celleb_scc_epci.c --- linux-2.6.39.1/arch/powerpc/platforms/cell/celleb_scc_epci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/platforms/cell/celleb_scc_epci.c 2011-05-22 19:36:30.000000000 -0400 @@ -245,7 +245,7 @@ static int celleb_epci_write_config(stru return celleb_epci_check_abort(hose, addr); } -struct pci_ops celleb_epci_ops = { +const struct pci_ops celleb_epci_ops = { .read = celleb_epci_read_config, .write = celleb_epci_write_config, }; diff -urNp linux-2.6.39.1/arch/powerpc/platforms/cell/celleb_scc_pciex.c linux-2.6.39.1/arch/powerpc/platforms/cell/celleb_scc_pciex.c --- linux-2.6.39.1/arch/powerpc/platforms/cell/celleb_scc_pciex.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/platforms/cell/celleb_scc_pciex.c 2011-05-22 19:36:30.000000000 -0400 @@ -399,7 +399,7 @@ static int scc_pciex_write_config(struct return PCIBIOS_SUCCESSFUL; } -static struct pci_ops scc_pciex_pci_ops = { +static const struct pci_ops scc_pciex_pci_ops = { scc_pciex_read_config, scc_pciex_write_config, }; diff -urNp linux-2.6.39.1/arch/powerpc/platforms/cell/iommu.c linux-2.6.39.1/arch/powerpc/platforms/cell/iommu.c --- linux-2.6.39.1/arch/powerpc/platforms/cell/iommu.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/platforms/cell/iommu.c 2011-05-22 19:36:30.000000000 -0400 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); -struct dma_map_ops dma_iommu_fixed_ops = { +const struct dma_map_ops dma_iommu_fixed_ops = { .alloc_coherent = dma_fixed_alloc_coherent, .free_coherent = dma_fixed_free_coherent, .map_sg = dma_fixed_map_sg, diff -urNp linux-2.6.39.1/arch/powerpc/platforms/chrp/pci.c linux-2.6.39.1/arch/powerpc/platforms/chrp/pci.c --- linux-2.6.39.1/arch/powerpc/platforms/chrp/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/platforms/chrp/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -84,7 +84,7 @@ int gg2_write_config(struct pci_bus *bus return PCIBIOS_SUCCESSFUL; } -static struct pci_ops gg2_pci_ops = +static const struct pci_ops gg2_pci_ops = { .read = gg2_read_config, .write = gg2_write_config, @@ -122,7 +122,7 @@ int rtas_write_config(struct pci_bus *bu return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL; } -static struct pci_ops rtas_pci_ops = +static const struct pci_ops rtas_pci_ops = { .read = rtas_read_config, .write = rtas_write_config, diff -urNp linux-2.6.39.1/arch/powerpc/platforms/iseries/pci.c linux-2.6.39.1/arch/powerpc/platforms/iseries/pci.c --- linux-2.6.39.1/arch/powerpc/platforms/iseries/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/platforms/iseries/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -533,7 +533,7 @@ static int iSeries_pci_write_config(stru return 0; } -static struct pci_ops iSeries_pci_ops = { +static const struct pci_ops iSeries_pci_ops = { .read = iSeries_pci_read_config, .write = iSeries_pci_write_config }; diff -urNp linux-2.6.39.1/arch/powerpc/platforms/maple/pci.c linux-2.6.39.1/arch/powerpc/platforms/maple/pci.c --- linux-2.6.39.1/arch/powerpc/platforms/maple/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/platforms/maple/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -180,7 +180,7 @@ static int u3_agp_write_config(struct pc return PCIBIOS_SUCCESSFUL; } -static struct pci_ops u3_agp_pci_ops = +static const struct pci_ops u3_agp_pci_ops = { .read = u3_agp_read_config, .write = u3_agp_write_config, @@ -276,7 +276,7 @@ static int u3_ht_write_config(struct pci return PCIBIOS_SUCCESSFUL; } -static struct pci_ops u3_ht_pci_ops = +static const struct pci_ops u3_ht_pci_ops = { .read = u3_ht_read_config, .write = u3_ht_write_config, @@ -381,7 +381,7 @@ static int u4_pcie_write_config(struct p return PCIBIOS_SUCCESSFUL; } -static struct pci_ops u4_pcie_pci_ops = +static const struct pci_ops u4_pcie_pci_ops = { .read = u4_pcie_read_config, .write = u4_pcie_write_config, diff -urNp linux-2.6.39.1/arch/powerpc/platforms/pasemi/pci.c linux-2.6.39.1/arch/powerpc/platforms/pasemi/pci.c --- linux-2.6.39.1/arch/powerpc/platforms/pasemi/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/platforms/pasemi/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -176,7 +176,7 @@ static int pa_pxp_write_config(struct pc return PCIBIOS_SUCCESSFUL; } -static struct pci_ops pa_pxp_ops = { +static const struct pci_ops pa_pxp_ops = { .read = pa_pxp_read_config, .write = pa_pxp_write_config, }; diff -urNp linux-2.6.39.1/arch/powerpc/platforms/powermac/pci.c linux-2.6.39.1/arch/powerpc/platforms/powermac/pci.c --- linux-2.6.39.1/arch/powerpc/platforms/powermac/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/platforms/powermac/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -218,7 +218,7 @@ static int macrisc_write_config(struct p return PCIBIOS_SUCCESSFUL; } -static struct pci_ops macrisc_pci_ops = +static const struct pci_ops macrisc_pci_ops = { .read = macrisc_read_config, .write = macrisc_write_config, @@ -273,7 +273,7 @@ chaos_write_config(struct pci_bus *bus, return macrisc_write_config(bus, devfn, offset, len, val); } -static struct pci_ops chaos_pci_ops = +static const struct pci_ops chaos_pci_ops = { .read = chaos_read_config, .write = chaos_write_config, diff -urNp linux-2.6.39.1/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.39.1/arch/powerpc/platforms/ps3/system-bus.c --- linux-2.6.39.1/arch/powerpc/platforms/ps3/system-bus.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/platforms/ps3/system-bus.c 2011-05-22 19:36:30.000000000 -0400 @@ -695,7 +695,7 @@ static int ps3_dma_supported(struct devi return mask >= DMA_BIT_MASK(32); } -static struct dma_map_ops ps3_sb_dma_ops = { +static const struct dma_map_ops ps3_sb_dma_ops = { .alloc_coherent = ps3_alloc_coherent, .free_coherent = ps3_free_coherent, .map_sg = ps3_sb_map_sg, @@ -705,7 +705,7 @@ static struct dma_map_ops ps3_sb_dma_ops .unmap_page = ps3_unmap_page, }; -static struct dma_map_ops ps3_ioc0_dma_ops = { +static const struct dma_map_ops ps3_ioc0_dma_ops = { .alloc_coherent = ps3_alloc_coherent, .free_coherent = ps3_free_coherent, .map_sg = ps3_ioc0_map_sg, diff -urNp linux-2.6.39.1/arch/powerpc/sysdev/fsl_pci.c linux-2.6.39.1/arch/powerpc/sysdev/fsl_pci.c --- linux-2.6.39.1/arch/powerpc/sysdev/fsl_pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/sysdev/fsl_pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -573,7 +573,7 @@ static int mpc83xx_pcie_write_config(str return PCIBIOS_SUCCESSFUL; } -static struct pci_ops mpc83xx_pcie_ops = { +static const struct pci_ops mpc83xx_pcie_ops = { .read = mpc83xx_pcie_read_config, .write = mpc83xx_pcie_write_config, }; diff -urNp linux-2.6.39.1/arch/powerpc/sysdev/indirect_pci.c linux-2.6.39.1/arch/powerpc/sysdev/indirect_pci.c --- linux-2.6.39.1/arch/powerpc/sysdev/indirect_pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/sysdev/indirect_pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -148,7 +148,7 @@ indirect_write_config(struct pci_bus *bu return PCIBIOS_SUCCESSFUL; } -static struct pci_ops indirect_pci_ops = +static const struct pci_ops indirect_pci_ops = { .read = indirect_read_config, .write = indirect_write_config, diff -urNp linux-2.6.39.1/arch/powerpc/sysdev/ppc4xx_pci.c linux-2.6.39.1/arch/powerpc/sysdev/ppc4xx_pci.c --- linux-2.6.39.1/arch/powerpc/sysdev/ppc4xx_pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/sysdev/ppc4xx_pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -1514,7 +1514,7 @@ static int ppc4xx_pciex_write_config(str return PCIBIOS_SUCCESSFUL; } -static struct pci_ops ppc4xx_pciex_pci_ops = +static const struct pci_ops ppc4xx_pciex_pci_ops = { .read = ppc4xx_pciex_read_config, .write = ppc4xx_pciex_write_config, diff -urNp linux-2.6.39.1/arch/powerpc/sysdev/tsi108_pci.c linux-2.6.39.1/arch/powerpc/sysdev/tsi108_pci.c --- linux-2.6.39.1/arch/powerpc/sysdev/tsi108_pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/powerpc/sysdev/tsi108_pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -190,7 +190,7 @@ void tsi108_clear_pci_cfg_error(void) tsi108_clear_pci_error(tsi108_pci_cfg_phys); } -static struct pci_ops tsi108_direct_pci_ops = { +static const struct pci_ops tsi108_direct_pci_ops = { .read = tsi108_direct_read_config, .write = tsi108_direct_write_config, }; diff -urNp linux-2.6.39.1/arch/s390/include/asm/elf.h linux-2.6.39.1/arch/s390/include/asm/elf.h --- linux-2.6.39.1/arch/s390/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/s390/include/asm/elf.h 2011-05-22 19:36:30.000000000 -0400 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled; the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ -extern unsigned long randomize_et_dyn(unsigned long base); -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2)) +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2) + +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL) + +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 ) +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 ) +#endif /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. */ @@ -222,7 +228,4 @@ struct linux_binprm; #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 int arch_setup_additional_pages(struct linux_binprm *, int); -extern unsigned long arch_randomize_brk(struct mm_struct *mm); -#define arch_randomize_brk arch_randomize_brk - #endif diff -urNp linux-2.6.39.1/arch/s390/include/asm/system.h linux-2.6.39.1/arch/s390/include/asm/system.h --- linux-2.6.39.1/arch/s390/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/s390/include/asm/system.h 2011-05-22 19:36:30.000000000 -0400 @@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co extern void (*_machine_halt)(void); extern void (*_machine_power_off)(void); -extern unsigned long arch_align_stack(unsigned long sp); +#define arch_align_stack(x) ((x) & ~0xfUL) static inline int tprot(unsigned long addr) { diff -urNp linux-2.6.39.1/arch/s390/include/asm/uaccess.h linux-2.6.39.1/arch/s390/include/asm/uaccess.h --- linux-2.6.39.1/arch/s390/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/s390/include/asm/uaccess.h 2011-05-22 19:36:30.000000000 -0400 @@ -234,6 +234,10 @@ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); + + if ((long)n < 0) + return n; + if (access_ok(VERIFY_WRITE, to, n)) n = __copy_to_user(to, from, n); return n; @@ -259,6 +263,9 @@ copy_to_user(void __user *to, const void static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) { + if ((long)n < 0) + return n; + if (__builtin_constant_p(n) && (n <= 256)) return uaccess.copy_from_user_small(n, from, to); else @@ -293,6 +300,10 @@ copy_from_user(void *to, const void __us unsigned int sz = __compiletime_object_size(to); might_fault(); + + if ((long)n < 0) + return n; + if (unlikely(sz != -1 && sz < n)) { copy_from_user_overflow(); return n; diff -urNp linux-2.6.39.1/arch/s390/Kconfig linux-2.6.39.1/arch/s390/Kconfig --- linux-2.6.39.1/arch/s390/Kconfig 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/s390/Kconfig 2011-05-22 19:36:30.000000000 -0400 @@ -234,11 +234,9 @@ config S390_EXEC_PROTECT prompt "Data execute protection" help This option allows to enable a buffer overflow protection for user - space programs and it also selects the addressing mode option above. - The kernel parameter noexec=on will enable this feature and also - switch the addressing modes, default is disabled. Enabling this (via - kernel parameter) on machines earlier than IBM System z9 this will - reduce system performance. + space programs. + Enabling this (via kernel parameter) on machines earlier than IBM + System z9 this will reduce system performance. comment "Code generation options" diff -urNp linux-2.6.39.1/arch/s390/kernel/module.c linux-2.6.39.1/arch/s390/kernel/module.c --- linux-2.6.39.1/arch/s390/kernel/module.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/s390/kernel/module.c 2011-05-22 19:36:30.000000000 -0400 @@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, /* Increase core size by size of got & plt and set start offsets for got and plt. */ - me->core_size = ALIGN(me->core_size, 4); - me->arch.got_offset = me->core_size; - me->core_size += me->arch.got_size; - me->arch.plt_offset = me->core_size; - me->core_size += me->arch.plt_size; + me->core_size_rw = ALIGN(me->core_size_rw, 4); + me->arch.got_offset = me->core_size_rw; + me->core_size_rw += me->arch.got_size; + me->arch.plt_offset = me->core_size_rx; + me->core_size_rx += me->arch.plt_size; return 0; } @@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base if (info->got_initialized == 0) { Elf_Addr *gotent; - gotent = me->module_core + me->arch.got_offset + + gotent = me->module_core_rw + me->arch.got_offset + info->got_offset; *gotent = val; info->got_initialized = 1; @@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base else if (r_type == R_390_GOTENT || r_type == R_390_GOTPLTENT) *(unsigned int *) loc = - (val + (Elf_Addr) me->module_core - loc) >> 1; + (val + (Elf_Addr) me->module_core_rw - loc) >> 1; else if (r_type == R_390_GOT64 || r_type == R_390_GOTPLT64) *(unsigned long *) loc = val; @@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ if (info->plt_initialized == 0) { unsigned int *ip; - ip = me->module_core + me->arch.plt_offset + + ip = me->module_core_rx + me->arch.plt_offset + info->plt_offset; #ifndef CONFIG_64BIT ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ @@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base val - loc + 0xffffUL < 0x1ffffeUL) || (r_type == R_390_PLT32DBL && val - loc + 0xffffffffULL < 0x1fffffffeULL))) - val = (Elf_Addr) me->module_core + + val = (Elf_Addr) me->module_core_rx + me->arch.plt_offset + info->plt_offset; val += rela->r_addend - loc; @@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base case R_390_GOTOFF32: /* 32 bit offset to GOT. */ case R_390_GOTOFF64: /* 64 bit offset to GOT. */ val = val + rela->r_addend - - ((Elf_Addr) me->module_core + me->arch.got_offset); + ((Elf_Addr) me->module_core_rw + me->arch.got_offset); if (r_type == R_390_GOTOFF16) *(unsigned short *) loc = val; else if (r_type == R_390_GOTOFF32) @@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base break; case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ - val = (Elf_Addr) me->module_core + me->arch.got_offset + + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset + rela->r_addend - loc; if (r_type == R_390_GOTPC) *(unsigned int *) loc = val; diff -urNp linux-2.6.39.1/arch/s390/kernel/process.c linux-2.6.39.1/arch/s390/kernel/process.c --- linux-2.6.39.1/arch/s390/kernel/process.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/s390/kernel/process.c 2011-05-22 19:36:30.000000000 -0400 @@ -334,39 +334,3 @@ unsigned long get_wchan(struct task_stru } return 0; } - -unsigned long arch_align_stack(unsigned long sp) -{ - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) - sp -= get_random_int() & ~PAGE_MASK; - return sp & ~0xf; -} - -static inline unsigned long brk_rnd(void) -{ - /* 8MB for 32bit, 1GB for 64bit */ - if (is_32bit_task()) - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; - else - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT; -} - -unsigned long arch_randomize_brk(struct mm_struct *mm) -{ - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); - - if (ret < mm->brk) - return mm->brk; - return ret; -} - -unsigned long randomize_et_dyn(unsigned long base) -{ - unsigned long ret = PAGE_ALIGN(base + brk_rnd()); - - if (!(current->flags & PF_RANDOMIZE)) - return base; - if (ret < base) - return base; - return ret; -} diff -urNp linux-2.6.39.1/arch/s390/kernel/setup.c linux-2.6.39.1/arch/s390/kernel/setup.c --- linux-2.6.39.1/arch/s390/kernel/setup.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/s390/kernel/setup.c 2011-05-22 19:36:30.000000000 -0400 @@ -271,7 +271,7 @@ static int __init early_parse_mem(char * } early_param("mem", early_parse_mem); -unsigned int user_mode = HOME_SPACE_MODE; +unsigned int user_mode = SECONDARY_SPACE_MODE; EXPORT_SYMBOL_GPL(user_mode); static int set_amode_and_uaccess(unsigned long user_amode, @@ -300,17 +300,6 @@ static int set_amode_and_uaccess(unsigne } } -/* - * Switch kernel/user addressing modes? - */ -static int __init early_parse_switch_amode(char *p) -{ - if (user_mode != SECONDARY_SPACE_MODE) - user_mode = PRIMARY_SPACE_MODE; - return 0; -} -early_param("switch_amode", early_parse_switch_amode); - static int __init early_parse_user_mode(char *p) { if (p && strcmp(p, "primary") == 0) @@ -327,20 +316,6 @@ static int __init early_parse_user_mode( } early_param("user_mode", early_parse_user_mode); -#ifdef CONFIG_S390_EXEC_PROTECT -/* - * Enable execute protection? - */ -static int __init early_parse_noexec(char *p) -{ - if (!strncmp(p, "off", 3)) - return 0; - user_mode = SECONDARY_SPACE_MODE; - return 0; -} -early_param("noexec", early_parse_noexec); -#endif /* CONFIG_S390_EXEC_PROTECT */ - static void setup_addressing_mode(void) { if (user_mode == SECONDARY_SPACE_MODE) { diff -urNp linux-2.6.39.1/arch/s390/mm/maccess.c linux-2.6.39.1/arch/s390/mm/maccess.c --- linux-2.6.39.1/arch/s390/mm/maccess.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/s390/mm/maccess.c 2011-05-22 19:36:30.000000000 -0400 @@ -45,7 +45,7 @@ static long probe_kernel_write_odd(void return rc ? rc : count; } -long probe_kernel_write(void *dst, void *src, size_t size) +long probe_kernel_write(void *dst, const void *src, size_t size) { long copied = 0; diff -urNp linux-2.6.39.1/arch/s390/mm/mmap.c linux-2.6.39.1/arch/s390/mm/mmap.c --- linux-2.6.39.1/arch/s390/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/s390/mm/mmap.c 2011-05-22 19:36:30.000000000 -0400 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str */ if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str */ if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = s390_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = s390_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } diff -urNp linux-2.6.39.1/arch/score/include/asm/system.h linux-2.6.39.1/arch/score/include/asm/system.h --- linux-2.6.39.1/arch/score/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/score/include/asm/system.h 2011-05-22 19:36:30.000000000 -0400 @@ -17,7 +17,7 @@ do { \ #define finish_arch_switch(prev) do {} while (0) typedef void (*vi_handler_t)(void); -extern unsigned long arch_align_stack(unsigned long sp); +#define arch_align_stack(x) (x) #define mb() barrier() #define rmb() barrier() diff -urNp linux-2.6.39.1/arch/score/kernel/process.c linux-2.6.39.1/arch/score/kernel/process.c --- linux-2.6.39.1/arch/score/kernel/process.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/score/kernel/process.c 2011-05-22 19:36:30.000000000 -0400 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru return task_pt_regs(task)->cp0_epc; } - -unsigned long arch_align_stack(unsigned long sp) -{ - return sp; -} diff -urNp linux-2.6.39.1/arch/sh/drivers/pci/ops-dreamcast.c linux-2.6.39.1/arch/sh/drivers/pci/ops-dreamcast.c --- linux-2.6.39.1/arch/sh/drivers/pci/ops-dreamcast.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sh/drivers/pci/ops-dreamcast.c 2011-05-22 19:36:30.000000000 -0400 @@ -76,7 +76,7 @@ static int gapspci_write(struct pci_bus return PCIBIOS_SUCCESSFUL; } -struct pci_ops gapspci_pci_ops = { +const struct pci_ops gapspci_pci_ops = { .read = gapspci_read, .write = gapspci_write, }; diff -urNp linux-2.6.39.1/arch/sh/drivers/pci/ops-sh4.c linux-2.6.39.1/arch/sh/drivers/pci/ops-sh4.c --- linux-2.6.39.1/arch/sh/drivers/pci/ops-sh4.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sh/drivers/pci/ops-sh4.c 2011-05-22 19:36:30.000000000 -0400 @@ -96,7 +96,7 @@ static int sh4_pci_write(struct pci_bus return PCIBIOS_SUCCESSFUL; } -struct pci_ops sh4_pci_ops = { +const struct pci_ops sh4_pci_ops = { .read = sh4_pci_read, .write = sh4_pci_write, }; diff -urNp linux-2.6.39.1/arch/sh/drivers/pci/ops-sh5.c linux-2.6.39.1/arch/sh/drivers/pci/ops-sh5.c --- linux-2.6.39.1/arch/sh/drivers/pci/ops-sh5.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sh/drivers/pci/ops-sh5.c 2011-05-22 19:36:30.000000000 -0400 @@ -62,7 +62,7 @@ static int sh5pci_write(struct pci_bus * return PCIBIOS_SUCCESSFUL; } -struct pci_ops sh5_pci_ops = { +const struct pci_ops sh5_pci_ops = { .read = sh5pci_read, .write = sh5pci_write, }; diff -urNp linux-2.6.39.1/arch/sh/drivers/pci/ops-sh7786.c linux-2.6.39.1/arch/sh/drivers/pci/ops-sh7786.c --- linux-2.6.39.1/arch/sh/drivers/pci/ops-sh7786.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sh/drivers/pci/ops-sh7786.c 2011-05-22 19:36:30.000000000 -0400 @@ -165,7 +165,7 @@ out: return ret; } -struct pci_ops sh7786_pci_ops = { +const struct pci_ops sh7786_pci_ops = { .read = sh7786_pcie_read, .write = sh7786_pcie_write, }; diff -urNp linux-2.6.39.1/arch/sh/drivers/pci/pcie-sh7786.c linux-2.6.39.1/arch/sh/drivers/pci/pcie-sh7786.c --- linux-2.6.39.1/arch/sh/drivers/pci/pcie-sh7786.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sh/drivers/pci/pcie-sh7786.c 2011-05-22 19:36:30.000000000 -0400 @@ -109,7 +109,7 @@ static struct resource sh7786_pci2_resou }, }; -extern struct pci_ops sh7786_pci_ops; +extern const struct pci_ops sh7786_pci_ops; #define DEFINE_CONTROLLER(start, idx) \ { \ diff -urNp linux-2.6.39.1/arch/sh/drivers/pci/pci-sh4.h linux-2.6.39.1/arch/sh/drivers/pci/pci-sh4.h --- linux-2.6.39.1/arch/sh/drivers/pci/pci-sh4.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sh/drivers/pci/pci-sh4.h 2011-05-22 19:36:30.000000000 -0400 @@ -161,7 +161,7 @@ #define SH4_PCIPDR 0x220 /* Port IO Data Register */ /* arch/sh/kernel/drivers/pci/ops-sh4.c */ -extern struct pci_ops sh4_pci_ops; +extern const struct pci_ops sh4_pci_ops; int pci_fixup_pcic(struct pci_channel *chan); struct sh4_pci_address_space { diff -urNp linux-2.6.39.1/arch/sh/drivers/pci/pci-sh5.h linux-2.6.39.1/arch/sh/drivers/pci/pci-sh5.h --- linux-2.6.39.1/arch/sh/drivers/pci/pci-sh5.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sh/drivers/pci/pci-sh5.h 2011-05-22 19:36:30.000000000 -0400 @@ -105,6 +105,6 @@ extern unsigned long pcicr_virt; #define PCISH5_MEM_SIZCONV(x) (((x / 0x40000) - 1) << 18) #define PCISH5_IO_SIZCONV(x) (((x / 0x40000) - 1) << 18) -extern struct pci_ops sh5_pci_ops; +extern const struct pci_ops sh5_pci_ops; #endif /* __PCI_SH5_H */ diff -urNp linux-2.6.39.1/arch/sh/include/asm/dma-mapping.h linux-2.6.39.1/arch/sh/include/asm/dma-mapping.h --- linux-2.6.39.1/arch/sh/include/asm/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sh/include/asm/dma-mapping.h 2011-05-22 19:36:30.000000000 -0400 @@ -1,10 +1,10 @@ #ifndef __ASM_SH_DMA_MAPPING_H #define __ASM_SH_DMA_MAPPING_H -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; extern void no_iommu_init(void); -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { return dma_ops; } @@ -14,7 +14,7 @@ static inline struct dma_map_ops *get_dm static inline int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (ops->dma_supported) return ops->dma_supported(dev, mask); @@ -24,7 +24,7 @@ static inline int dma_supported(struct d static inline int dma_set_mask(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (!dev->dma_mask || !dma_supported(dev, mask)) return -EIO; @@ -44,7 +44,7 @@ void dma_cache_sync(struct device *dev, static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (ops->mapping_error) return ops->mapping_error(dev, dma_addr); @@ -55,7 +55,7 @@ static inline int dma_mapping_error(stru static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); void *memory; if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) @@ -72,7 +72,7 @@ static inline void *dma_alloc_coherent(s static inline void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (dma_release_from_coherent(dev, get_order(size), vaddr)) return; diff -urNp linux-2.6.39.1/arch/sh/kernel/dma-nommu.c linux-2.6.39.1/arch/sh/kernel/dma-nommu.c --- linux-2.6.39.1/arch/sh/kernel/dma-nommu.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sh/kernel/dma-nommu.c 2011-05-22 19:36:30.000000000 -0400 @@ -62,7 +62,7 @@ static void nommu_sync_sg(struct device } #endif -struct dma_map_ops nommu_dma_ops = { +const struct dma_map_ops nommu_dma_ops = { .alloc_coherent = dma_generic_alloc_coherent, .free_coherent = dma_generic_free_coherent, .map_page = nommu_map_page, diff -urNp linux-2.6.39.1/arch/sh/kernel/kgdb.c linux-2.6.39.1/arch/sh/kernel/kgdb.c --- linux-2.6.39.1/arch/sh/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sh/kernel/kgdb.c 2011-05-22 19:36:30.000000000 -0400 @@ -319,7 +319,7 @@ void kgdb_arch_exit(void) unregister_die_notifier(&kgdb_notifier); } -struct kgdb_arch arch_kgdb_ops = { +const struct kgdb_arch arch_kgdb_ops = { /* Breakpoint instruction: trapa #0x3c */ #ifdef CONFIG_CPU_LITTLE_ENDIAN .gdb_bpt_instr = { 0x3c, 0xc3 }, diff -urNp linux-2.6.39.1/arch/sh/mm/consistent.c linux-2.6.39.1/arch/sh/mm/consistent.c --- linux-2.6.39.1/arch/sh/mm/consistent.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sh/mm/consistent.c 2011-05-22 19:36:30.000000000 -0400 @@ -22,7 +22,7 @@ #define PREALLOC_DMA_DEBUG_ENTRIES 4096 -struct dma_map_ops *dma_ops; +const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); static int __init dma_init(void) diff -urNp linux-2.6.39.1/arch/sh/mm/mmap.c linux-2.6.39.1/arch/sh/mm/mmap.c --- linux-2.6.39.1/arch/sh/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sh/mm/mmap.c 2011-05-22 19:36:30.000000000 -0400 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } @@ -106,7 +105,7 @@ full_search: } return -ENOMEM; } - if (likely(!vma || addr + len <= vma->vm_start)) { + if (likely(check_heap_stack_gap(vma, addr, len))) { /* * Remember the place where we stopped the search: */ @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { + if (check_heap_stack_gap(vma, addr - len, len)) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr-len); } @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi if (unlikely(mm->mmap_base < len)) goto bottomup; - addr = mm->mmap_base-len; - if (do_colour_align) - addr = COLOUR_ALIGN_DOWN(addr, pgoff); + addr = mm->mmap_base - len; do { + if (do_colour_align) + addr = COLOUR_ALIGN_DOWN(addr, pgoff); /* * Lookup failure means no vma is above this address, * else if new region fits below vma->vm_start, * return with success: */ vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { + if (likely(check_heap_stack_gap(vma, addr, len))) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); } @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ - addr = vma->vm_start-len; - if (do_colour_align) - addr = COLOUR_ALIGN_DOWN(addr, pgoff); - } while (likely(len < vma->vm_start)); + addr = skip_heap_stack_gap(vma, len); + } while (!IS_ERR_VALUE(addr)); bottomup: /* diff -urNp linux-2.6.39.1/arch/sparc/include/asm/atomic_64.h linux-2.6.39.1/arch/sparc/include/asm/atomic_64.h --- linux-2.6.39.1/arch/sparc/include/asm/atomic_64.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/include/asm/atomic_64.h 2011-05-22 19:36:30.000000000 -0400 @@ -14,18 +14,40 @@ #define ATOMIC64_INIT(i) { (i) } #define atomic_read(v) (*(volatile int *)&(v)->counter) +static inline int atomic_read_unchecked(const atomic_unchecked_t *v) +{ + return v->counter; +} #define atomic64_read(v) (*(volatile long *)&(v)->counter) +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) +{ + return v->counter; +} #define atomic_set(v, i) (((v)->counter) = i) +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) +{ + v->counter = i; +} #define atomic64_set(v, i) (((v)->counter) = i) +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) +{ + v->counter = i; +} extern void atomic_add(int, atomic_t *); +extern void atomic_add_unchecked(int, atomic_unchecked_t *); extern void atomic64_add(long, atomic64_t *); +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *); extern void atomic_sub(int, atomic_t *); +extern void atomic_sub_unchecked(int, atomic_unchecked_t *); extern void atomic64_sub(long, atomic64_t *); +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *); extern int atomic_add_ret(int, atomic_t *); +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *); extern long atomic64_add_ret(long, atomic64_t *); +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *); extern int atomic_sub_ret(int, atomic_t *); extern long atomic64_sub_ret(long, atomic64_t *); @@ -33,12 +55,24 @@ extern long atomic64_sub_ret(long, atomi #define atomic64_dec_return(v) atomic64_sub_ret(1, v) #define atomic_inc_return(v) atomic_add_ret(1, v) +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) +{ + return atomic_add_ret_unchecked(1, v); +} #define atomic64_inc_return(v) atomic64_add_ret(1, v) +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) +{ + return atomic64_add_ret_unchecked(1, v); +} #define atomic_sub_return(i, v) atomic_sub_ret(i, v) #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v) #define atomic_add_return(i, v) atomic_add_ret(i, v) +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) +{ + return atomic_add_ret_unchecked(i, v); +} #define atomic64_add_return(i, v) atomic64_add_ret(i, v) /* @@ -50,6 +84,7 @@ extern long atomic64_sub_ret(long, atomi * other cases. */ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) +#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0) #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0) @@ -59,30 +94,59 @@ extern long atomic64_sub_ret(long, atomi #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0) #define atomic_inc(v) atomic_add(1, v) +static inline void atomic_inc_unchecked(atomic_unchecked_t *v) +{ + atomic_add_unchecked(1, v); +} #define atomic64_inc(v) atomic64_add(1, v) +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) +{ + atomic64_add_unchecked(1, v); +} #define atomic_dec(v) atomic_sub(1, v) +static inline void atomic_dec_unchecked(atomic_unchecked_t *v) +{ + atomic_sub_unchecked(1, v); +} #define atomic64_dec(v) atomic64_sub(1, v) +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) +{ + atomic64_sub_unchecked(1, v); +} #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) +#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new)) static inline int atomic_add_unless(atomic_t *v, int a, int u) { - int c, old; + int c, old, new; c = atomic_read(v); for (;;) { - if (unlikely(c == (u))) + if (unlikely(c == u)) break; - old = atomic_cmpxchg((v), c, c + (a)); + + asm volatile("addcc %2, %0, %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "tvs %%icc, 6\n" +#endif + + : "=r" (new) + : "0" (c), "ir" (a) + : "cc"); + + old = atomic_cmpxchg(v, c, new); if (likely(old == c)) break; c = old; } - return c != (u); + return c != u; } #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) @@ -93,17 +157,28 @@ static inline int atomic_add_unless(atom static inline long atomic64_add_unless(atomic64_t *v, long a, long u) { - long c, old; + long c, old, new; c = atomic64_read(v); for (;;) { - if (unlikely(c == (u))) + if (unlikely(c == u)) break; - old = atomic64_cmpxchg((v), c, c + (a)); + + asm volatile("addcc %2, %0, %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "tvs %%xcc, 6\n" +#endif + + : "=r" (new) + : "0" (c), "ir" (a) + : "cc"); + + old = atomic64_cmpxchg(v, c, new); if (likely(old == c)) break; c = old; } - return c != (u); + return c != u; } #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) diff -urNp linux-2.6.39.1/arch/sparc/include/asm/cache.h linux-2.6.39.1/arch/sparc/include/asm/cache.h --- linux-2.6.39.1/arch/sparc/include/asm/cache.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/include/asm/cache.h 2011-05-22 19:36:30.000000000 -0400 @@ -10,7 +10,7 @@ #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) #define L1_CACHE_SHIFT 5 -#define L1_CACHE_BYTES 32 +#define L1_CACHE_BYTES 32U #ifdef CONFIG_SPARC32 #define SMP_CACHE_BYTES_SHIFT 5 diff -urNp linux-2.6.39.1/arch/sparc/include/asm/dma-mapping.h linux-2.6.39.1/arch/sparc/include/asm/dma-mapping.h --- linux-2.6.39.1/arch/sparc/include/asm/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/include/asm/dma-mapping.h 2011-05-22 19:36:30.000000000 -0400 @@ -12,10 +12,10 @@ extern int dma_supported(struct device * #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -extern struct dma_map_ops *dma_ops, pci32_dma_ops; +extern const struct dma_map_ops *dma_ops, pci32_dma_ops; extern struct bus_type pci_bus_type; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) if (dev->bus == &pci_bus_type) @@ -29,7 +29,7 @@ static inline struct dma_map_ops *get_dm static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); void *cpu_addr; cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag); @@ -40,7 +40,7 @@ static inline void *dma_alloc_coherent(s static inline void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); ops->free_coherent(dev, size, cpu_addr, dma_handle); diff -urNp linux-2.6.39.1/arch/sparc/include/asm/elf_32.h linux-2.6.39.1/arch/sparc/include/asm/elf_32.h --- linux-2.6.39.1/arch/sparc/include/asm/elf_32.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/include/asm/elf_32.h 2011-05-22 19:36:30.000000000 -0400 @@ -114,6 +114,13 @@ typedef struct { #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE) +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE 0x10000UL + +#define PAX_DELTA_MMAP_LEN 16 +#define PAX_DELTA_STACK_LEN 16 +#endif + /* This yields a mask that user programs can use to figure out what instruction set this cpu supports. This can NOT be done in userspace on Sparc. */ diff -urNp linux-2.6.39.1/arch/sparc/include/asm/elf_64.h linux-2.6.39.1/arch/sparc/include/asm/elf_64.h --- linux-2.6.39.1/arch/sparc/include/asm/elf_64.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/include/asm/elf_64.h 2011-05-22 19:36:30.000000000 -0400 @@ -162,6 +162,12 @@ typedef struct { #define ELF_ET_DYN_BASE 0x0000010000000000UL #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL +#ifdef CONFIG_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL) + +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28) +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29) +#endif /* This yields a mask that user programs can use to figure out what instruction set this cpu supports. */ diff -urNp linux-2.6.39.1/arch/sparc/include/asm/pgtable_32.h linux-2.6.39.1/arch/sparc/include/asm/pgtable_32.h --- linux-2.6.39.1/arch/sparc/include/asm/pgtable_32.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/include/asm/pgtable_32.h 2011-05-22 19:36:30.000000000 -0400 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd) BTFIXUPDEF_INT(page_none) BTFIXUPDEF_INT(page_copy) BTFIXUPDEF_INT(page_readonly) + +#ifdef CONFIG_PAX_PAGEEXEC +BTFIXUPDEF_INT(page_shared_noexec) +BTFIXUPDEF_INT(page_copy_noexec) +BTFIXUPDEF_INT(page_readonly_noexec) +#endif + BTFIXUPDEF_INT(page_kernel) #define PMD_SHIFT SUN4C_PMD_SHIFT @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED; #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy)) #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly)) +#ifdef CONFIG_PAX_PAGEEXEC +extern pgprot_t PAGE_SHARED_NOEXEC; +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec)) +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec)) +#else +# define PAGE_SHARED_NOEXEC PAGE_SHARED +# define PAGE_COPY_NOEXEC PAGE_COPY +# define PAGE_READONLY_NOEXEC PAGE_READONLY +#endif + extern unsigned long page_kernel; #ifdef MODULE diff -urNp linux-2.6.39.1/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.39.1/arch/sparc/include/asm/pgtsrmmu.h --- linux-2.6.39.1/arch/sparc/include/asm/pgtsrmmu.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/include/asm/pgtsrmmu.h 2011-05-22 19:36:30.000000000 -0400 @@ -115,6 +115,13 @@ SRMMU_EXEC | SRMMU_REF) #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \ SRMMU_EXEC | SRMMU_REF) + +#ifdef CONFIG_PAX_PAGEEXEC +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF) +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF) +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF) +#endif + #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \ SRMMU_DIRTY | SRMMU_REF) diff -urNp linux-2.6.39.1/arch/sparc/include/asm/spinlock_64.h linux-2.6.39.1/arch/sparc/include/asm/spinlock_64.h --- linux-2.6.39.1/arch/sparc/include/asm/spinlock_64.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/include/asm/spinlock_64.h 2011-05-22 19:36:30.000000000 -0400 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags( /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ -static void inline arch_read_lock(arch_rwlock_t *lock) +static inline void arch_read_lock(arch_rwlock_t *lock) { unsigned long tmp1, tmp2; __asm__ __volatile__ ( "1: ldsw [%2], %0\n" " brlz,pn %0, 2f\n" -"4: add %0, 1, %1\n" +"4: addcc %0, 1, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT +" tvs %%icc, 6\n" +#endif + " cas [%2], %0, %1\n" " cmp %0, %1\n" " bne,pn %%icc, 1b\n" @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r " .previous" : "=&r" (tmp1), "=&r" (tmp2) : "r" (lock) - : "memory"); + : "memory", "cc"); } -static int inline arch_read_trylock(arch_rwlock_t *lock) +static inline int arch_read_trylock(arch_rwlock_t *lock) { int tmp1, tmp2; @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch "1: ldsw [%2], %0\n" " brlz,a,pn %0, 2f\n" " mov 0, %0\n" -" add %0, 1, %1\n" +" addcc %0, 1, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT +" tvs %%icc, 6\n" +#endif + " cas [%2], %0, %1\n" " cmp %0, %1\n" " bne,pn %%icc, 1b\n" @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch return tmp1; } -static void inline arch_read_unlock(arch_rwlock_t *lock) +static inline void arch_read_unlock(arch_rwlock_t *lock) { unsigned long tmp1, tmp2; __asm__ __volatile__( "1: lduw [%2], %0\n" -" sub %0, 1, %1\n" +" subcc %0, 1, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT +" tvs %%icc, 6\n" +#endif + " cas [%2], %0, %1\n" " cmp %0, %1\n" " bne,pn %%xcc, 1b\n" @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch : "memory"); } -static void inline arch_write_lock(arch_rwlock_t *lock) +static inline void arch_write_lock(arch_rwlock_t *lock) { unsigned long mask, tmp1, tmp2; @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_ : "memory"); } -static void inline arch_write_unlock(arch_rwlock_t *lock) +static inline void arch_write_unlock(arch_rwlock_t *lock) { __asm__ __volatile__( " stw %%g0, [%0]" @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc : "memory"); } -static int inline arch_write_trylock(arch_rwlock_t *lock) +static inline int arch_write_trylock(arch_rwlock_t *lock) { unsigned long mask, tmp1, tmp2, result; diff -urNp linux-2.6.39.1/arch/sparc/include/asm/thread_info_32.h linux-2.6.39.1/arch/sparc/include/asm/thread_info_32.h --- linux-2.6.39.1/arch/sparc/include/asm/thread_info_32.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/include/asm/thread_info_32.h 2011-06-03 01:14:03.000000000 -0400 @@ -50,6 +50,8 @@ struct thread_info { unsigned long w_saved; struct restart_block restart_block; + + unsigned long lowest_stack; }; /* diff -urNp linux-2.6.39.1/arch/sparc/include/asm/thread_info_64.h linux-2.6.39.1/arch/sparc/include/asm/thread_info_64.h --- linux-2.6.39.1/arch/sparc/include/asm/thread_info_64.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/include/asm/thread_info_64.h 2011-06-03 01:14:21.000000000 -0400 @@ -63,6 +63,8 @@ struct thread_info { struct pt_regs *kern_una_regs; unsigned int kern_una_insn; + unsigned long lowest_stack; + unsigned long fpregs[0] __attribute__ ((aligned(64))); }; diff -urNp linux-2.6.39.1/arch/sparc/include/asm/uaccess_32.h linux-2.6.39.1/arch/sparc/include/asm/uaccess_32.h --- linux-2.6.39.1/arch/sparc/include/asm/uaccess_32.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/include/asm/uaccess_32.h 2011-05-22 19:36:30.000000000 -0400 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __ static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) { - if (n && __access_ok((unsigned long) to, n)) + if ((long)n < 0) + return n; + + if (n && __access_ok((unsigned long) to, n)) { + if (!__builtin_constant_p(n)) + check_object_size(from, n, true); return __copy_user(to, (__force void __user *) from, n); - else + } else return n; } static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { + if ((long)n < 0) + return n; + + if (!__builtin_constant_p(n)) + check_object_size(from, n, true); + return __copy_user(to, (__force void __user *) from, n); } static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { - if (n && __access_ok((unsigned long) from, n)) + if ((long)n < 0) + return n; + + if (n && __access_ok((unsigned long) from, n)) { + if (!__builtin_constant_p(n)) + check_object_size(to, n, false); return __copy_user((__force void __user *) to, from, n); - else + } else return n; } static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { + if ((long)n < 0) + return n; + return __copy_user((__force void __user *) to, from, n); } diff -urNp linux-2.6.39.1/arch/sparc/include/asm/uaccess_64.h linux-2.6.39.1/arch/sparc/include/asm/uaccess_64.h --- linux-2.6.39.1/arch/sparc/include/asm/uaccess_64.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/include/asm/uaccess_64.h 2011-05-22 19:36:30.000000000 -0400 @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long size) { - unsigned long ret = ___copy_from_user(to, from, size); + unsigned long ret; + if ((long)size < 0 || size > INT_MAX) + return size; + + if (!__builtin_constant_p(size)) + check_object_size(to, size, false); + + ret = ___copy_from_user(to, from, size); if (unlikely(ret)) ret = copy_from_user_fixup(to, from, size); @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup( static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long size) { - unsigned long ret = ___copy_to_user(to, from, size); + unsigned long ret; + + if ((long)size < 0 || size > INT_MAX) + return size; + + if (!__builtin_constant_p(size)) + check_object_size(from, size, true); + ret = ___copy_to_user(to, from, size); if (unlikely(ret)) ret = copy_to_user_fixup(to, from, size); return ret; diff -urNp linux-2.6.39.1/arch/sparc/include/asm/uaccess.h linux-2.6.39.1/arch/sparc/include/asm/uaccess.h --- linux-2.6.39.1/arch/sparc/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/include/asm/uaccess.h 2011-05-22 19:36:30.000000000 -0400 @@ -1,5 +1,13 @@ #ifndef ___ASM_SPARC_UACCESS_H #define ___ASM_SPARC_UACCESS_H + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +#include +extern void check_object_size(const void *ptr, unsigned long n, bool to); +#endif +#endif + #if defined(__sparc__) && defined(__arch64__) #include #else diff -urNp linux-2.6.39.1/arch/sparc/kernel/iommu.c linux-2.6.39.1/arch/sparc/kernel/iommu.c --- linux-2.6.39.1/arch/sparc/kernel/iommu.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/kernel/iommu.c 2011-05-22 19:36:30.000000000 -0400 @@ -824,7 +824,7 @@ static void dma_4u_sync_sg_for_cpu(struc spin_unlock_irqrestore(&iommu->lock, flags); } -static struct dma_map_ops sun4u_dma_ops = { +static const struct dma_map_ops sun4u_dma_ops = { .alloc_coherent = dma_4u_alloc_coherent, .free_coherent = dma_4u_free_coherent, .map_page = dma_4u_map_page, @@ -835,7 +835,7 @@ static struct dma_map_ops sun4u_dma_ops .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, }; -struct dma_map_ops *dma_ops = &sun4u_dma_ops; +const struct dma_map_ops *dma_ops = &sun4u_dma_ops; EXPORT_SYMBOL(dma_ops); extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask); diff -urNp linux-2.6.39.1/arch/sparc/kernel/ioport.c linux-2.6.39.1/arch/sparc/kernel/ioport.c --- linux-2.6.39.1/arch/sparc/kernel/ioport.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/kernel/ioport.c 2011-05-22 19:36:30.000000000 -0400 @@ -402,7 +402,7 @@ static void sbus_sync_sg_for_device(stru BUG(); } -struct dma_map_ops sbus_dma_ops = { +const struct dma_map_ops sbus_dma_ops = { .alloc_coherent = sbus_alloc_coherent, .free_coherent = sbus_free_coherent, .map_page = sbus_map_page, @@ -653,7 +653,7 @@ static void pci32_sync_sg_for_device(str } } -struct dma_map_ops pci32_dma_ops = { +const struct dma_map_ops pci32_dma_ops = { .alloc_coherent = pci32_alloc_coherent, .free_coherent = pci32_free_coherent, .map_page = pci32_map_page, diff -urNp linux-2.6.39.1/arch/sparc/kernel/kgdb_32.c linux-2.6.39.1/arch/sparc/kernel/kgdb_32.c --- linux-2.6.39.1/arch/sparc/kernel/kgdb_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/kernel/kgdb_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -164,7 +164,7 @@ void kgdb_arch_set_pc(struct pt_regs *re regs->npc = regs->pc + 4; } -struct kgdb_arch arch_kgdb_ops = { +const struct kgdb_arch arch_kgdb_ops = { /* Breakpoint instruction: ta 0x7d */ .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d }, }; diff -urNp linux-2.6.39.1/arch/sparc/kernel/kgdb_64.c linux-2.6.39.1/arch/sparc/kernel/kgdb_64.c --- linux-2.6.39.1/arch/sparc/kernel/kgdb_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/kernel/kgdb_64.c 2011-05-22 19:36:30.000000000 -0400 @@ -187,7 +187,7 @@ void kgdb_arch_set_pc(struct pt_regs *re regs->tnpc = regs->tpc + 4; } -struct kgdb_arch arch_kgdb_ops = { +const struct kgdb_arch arch_kgdb_ops = { /* Breakpoint instruction: ta 0x72 */ .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 }, }; diff -urNp linux-2.6.39.1/arch/sparc/kernel/Makefile linux-2.6.39.1/arch/sparc/kernel/Makefile --- linux-2.6.39.1/arch/sparc/kernel/Makefile 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/kernel/Makefile 2011-05-22 19:36:30.000000000 -0400 @@ -3,7 +3,7 @@ # asflags-y := -ansi -ccflags-y := -Werror +#ccflags-y := -Werror extra-y := head_$(BITS).o extra-y += init_task.o diff -urNp linux-2.6.39.1/arch/sparc/kernel/pcic.c linux-2.6.39.1/arch/sparc/kernel/pcic.c --- linux-2.6.39.1/arch/sparc/kernel/pcic.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/kernel/pcic.c 2011-05-22 19:36:30.000000000 -0400 @@ -268,7 +268,7 @@ static int pcic_write_config(struct pci_ return -EINVAL; } -static struct pci_ops pcic_ops = { +static const struct pci_ops pcic_ops = { .read = pcic_read_config, .write = pcic_write_config, }; diff -urNp linux-2.6.39.1/arch/sparc/kernel/pci_common.c linux-2.6.39.1/arch/sparc/kernel/pci_common.c --- linux-2.6.39.1/arch/sparc/kernel/pci_common.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/kernel/pci_common.c 2011-05-22 19:36:30.000000000 -0400 @@ -249,7 +249,7 @@ static int sun4u_write_pci_cfg(struct pc return PCIBIOS_SUCCESSFUL; } -struct pci_ops sun4u_pci_ops = { +const struct pci_ops sun4u_pci_ops = { .read = sun4u_read_pci_cfg, .write = sun4u_write_pci_cfg, }; @@ -310,7 +310,7 @@ static int sun4v_write_pci_cfg(struct pc return PCIBIOS_SUCCESSFUL; } -struct pci_ops sun4v_pci_ops = { +const struct pci_ops sun4v_pci_ops = { .read = sun4v_read_pci_cfg, .write = sun4v_write_pci_cfg, }; diff -urNp linux-2.6.39.1/arch/sparc/kernel/pci_impl.h linux-2.6.39.1/arch/sparc/kernel/pci_impl.h --- linux-2.6.39.1/arch/sparc/kernel/pci_impl.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/kernel/pci_impl.h 2011-05-22 19:36:30.000000000 -0400 @@ -175,8 +175,8 @@ extern void pci_config_write8(u8 *addr, extern void pci_config_write16(u16 *addr, u16 val); extern void pci_config_write32(u32 *addr, u32 val); -extern struct pci_ops sun4u_pci_ops; -extern struct pci_ops sun4v_pci_ops; +extern const struct pci_ops sun4u_pci_ops; +extern const struct pci_ops sun4v_pci_ops; extern volatile int pci_poke_in_progress; extern volatile int pci_poke_cpu; diff -urNp linux-2.6.39.1/arch/sparc/kernel/pci_sun4v.c linux-2.6.39.1/arch/sparc/kernel/pci_sun4v.c --- linux-2.6.39.1/arch/sparc/kernel/pci_sun4v.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/kernel/pci_sun4v.c 2011-05-22 19:36:30.000000000 -0400 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic spin_unlock_irqrestore(&iommu->lock, flags); } -static struct dma_map_ops sun4v_dma_ops = { +static const struct dma_map_ops sun4v_dma_ops = { .alloc_coherent = dma_4v_alloc_coherent, .free_coherent = dma_4v_free_coherent, .map_page = dma_4v_map_page, diff -urNp linux-2.6.39.1/arch/sparc/kernel/process_32.c linux-2.6.39.1/arch/sparc/kernel/process_32.c --- linux-2.6.39.1/arch/sparc/kernel/process_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/kernel/process_32.c 2011-05-22 19:41:32.000000000 -0400 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp) rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]); - printk("%pS\n", (void *) rw->ins[7]); + printk("%pA\n", (void *) rw->ins[7]); rw = (struct reg_window32 *) rw->ins[6]; } spin_unlock_irqrestore(&sparc_backtrace_lock, flags); @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r) printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n", r->psr, r->pc, r->npc, r->y, print_tainted()); - printk("PC: <%pS>\n", (void *) r->pc); + printk("PC: <%pA>\n", (void *) r->pc); printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3], r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]); printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11], r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]); - printk("RPC: <%pS>\n", (void *) r->u_regs[15]); + printk("RPC: <%pA>\n", (void *) r->u_regs[15]); printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3], @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, rw = (struct reg_window32 *) fp; pc = rw->ins[7]; printk("[%08lx : ", pc); - printk("%pS ] ", (void *) pc); + printk("%pA ] ", (void *) pc); fp = rw->ins[6]; } while (++count < 16); printk("\n"); diff -urNp linux-2.6.39.1/arch/sparc/kernel/process_64.c linux-2.6.39.1/arch/sparc/kernel/process_64.c --- linux-2.6.39.1/arch/sparc/kernel/process_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/kernel/process_64.c 2011-05-22 19:41:32.000000000 -0400 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n", rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]); if (regs->tstate & TSTATE_PRIV) - printk("I7: <%pS>\n", (void *) rwk->ins[7]); + printk("I7: <%pA>\n", (void *) rwk->ins[7]); } void show_regs(struct pt_regs *regs) { printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, regs->tpc, regs->tnpc, regs->y, print_tainted()); - printk("TPC: <%pS>\n", (void *) regs->tpc); + printk("TPC: <%pA>\n", (void *) regs->tpc); printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n", regs->u_regs[0], regs->u_regs[1], regs->u_regs[2], regs->u_regs[3]); @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs) printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n", regs->u_regs[12], regs->u_regs[13], regs->u_regs[14], regs->u_regs[15]); - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]); + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]); show_regwindow(regs); show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); } @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void ((tp && tp->task) ? tp->task->pid : -1)); if (gp->tstate & TSTATE_PRIV) { - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n", + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n", (void *) gp->tpc, (void *) gp->o7, (void *) gp->i7, diff -urNp linux-2.6.39.1/arch/sparc/kernel/sys_sparc_32.c linux-2.6.39.1/arch/sparc/kernel/sys_sparc_32.c --- linux-2.6.39.1/arch/sparc/kernel/sys_sparc_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/kernel/sys_sparc_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str if (ARCH_SUN4C && len > 0x20000000) return -ENOMEM; if (!addr) - addr = TASK_UNMAPPED_BASE; + addr = current->mm->mmap_base; if (flags & MAP_SHARED) addr = COLOUR_ALIGN(addr); @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str } if (TASK_SIZE - PAGE_SIZE - len < addr) return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) + if (check_heap_stack_gap(vmm, addr, len)) return addr; addr = vmm->vm_end; if (flags & MAP_SHARED) diff -urNp linux-2.6.39.1/arch/sparc/kernel/sys_sparc_64.c linux-2.6.39.1/arch/sparc/kernel/sys_sparc_64.c --- linux-2.6.39.1/arch/sparc/kernel/sys_sparc_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/kernel/sys_sparc_64.c 2011-05-22 19:36:30.000000000 -0400 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ - if ((flags & MAP_SHARED) && + if ((filp || (flags & MAP_SHARED)) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str if (filp || (flags & MAP_SHARED)) do_color_align = 1; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; + start_addr = addr = mm->free_area_cache; } else { - start_addr = addr = TASK_UNMAPPED_BASE; + start_addr = addr = mm->mmap_base; mm->cached_hole_size = 0; } @@ -174,14 +177,14 @@ full_search: vma = find_vma(mm, VA_EXCLUDE_END); } if (unlikely(task_size < addr)) { - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; + if (start_addr != mm->mmap_base) { + start_addr = addr = mm->mmap_base; mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; } - if (likely(!vma || addr + len <= vma->vm_start)) { + if (likely(check_heap_stack_gap(vma, addr, len))) { /* * Remember the place where we stopped the search: */ @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ - if ((flags & MAP_SHARED) && + if ((filp || (flags & MAP_SHARED)) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { + if (check_heap_stack_gap(vma, addr - len, len)) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr-len); } @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi if (unlikely(mm->mmap_base < len)) goto bottomup; - addr = mm->mmap_base-len; - if (do_color_align) - addr = COLOUR_ALIGN_DOWN(addr, pgoff); + addr = mm->mmap_base - len; do { + if (do_color_align) + addr = COLOUR_ALIGN_DOWN(addr, pgoff); /* * Lookup failure means no vma is above this address, * else if new region fits below vma->vm_start, * return with success: */ vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { + if (likely(check_heap_stack_gap(vma, addr, len))) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); } @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ - addr = vma->vm_start-len; - if (do_color_align) - addr = COLOUR_ALIGN_DOWN(addr, pgoff); - } while (likely(len < vma->vm_start)); + addr = skip_heap_stack_gap(vma, len); + } while (!IS_ERR_VALUE(addr)); bottomup: /* @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str gap == RLIM_INFINITY || sysctl_legacy_va_layout) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str gap = (task_size / 6 * 5); mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } diff -urNp linux-2.6.39.1/arch/sparc/kernel/traps_32.c linux-2.6.39.1/arch/sparc/kernel/traps_32.c --- linux-2.6.39.1/arch/sparc/kernel/traps_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/kernel/traps_32.c 2011-05-22 19:41:32.000000000 -0400 @@ -76,7 +76,7 @@ void die_if_kernel(char *str, struct pt_ count++ < 30 && (((unsigned long) rw) >= PAGE_OFFSET) && !(((unsigned long) rw) & 0x7)) { - printk("Caller[%08lx]: %pS\n", rw->ins[7], + printk("Caller[%08lx]: %pA\n", rw->ins[7], (void *) rw->ins[7]); rw = (struct reg_window32 *)rw->ins[6]; } diff -urNp linux-2.6.39.1/arch/sparc/kernel/traps_64.c linux-2.6.39.1/arch/sparc/kernel/traps_64.c --- linux-2.6.39.1/arch/sparc/kernel/traps_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/kernel/traps_64.c 2011-05-22 19:41:32.000000000 -0400 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_ i + 1, p->trapstack[i].tstate, p->trapstack[i].tpc, p->trapstack[i].tnpc, p->trapstack[i].tt); - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc); + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc); } } @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl -= 0x100; if (regs->tstate & TSTATE_PRIV) { + +#ifdef CONFIG_PAX_REFCOUNT + if (lvl == 6) + pax_report_refcount_overflow(regs); +#endif + sprintf(buffer, "Kernel bad sw trap %lx", lvl); die_if_kernel(buffer, regs); } @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long void bad_trap_tl1(struct pt_regs *regs, long lvl) { char buffer[32]; - + if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, 0, lvl, SIGTRAP) == NOTIFY_STOP) return; +#ifdef CONFIG_PAX_REFCOUNT + if (lvl == 6) + pax_report_refcount_overflow(regs); +#endif + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); sprintf (buffer, "Bad trap %lx at tl>0", lvl); @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate); printk("%s" "ERROR(%d): ", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id()); - printk("TPC<%pS>\n", (void *) regs->tpc); + printk("TPC<%pA>\n", (void *) regs->tpc); printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n", (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT, @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, smp_processor_id(), (type & 0x1) ? 'I' : 'D', regs->tpc); - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc); + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc); panic("Irrecoverable Cheetah+ parity error."); } @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, smp_processor_id(), (type & 0x1) ? 'I' : 'D', regs->tpc); - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc); + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc); } struct sun4v_error_entry { @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n", regs->tpc, tl); - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc); + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc); printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]); - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n", + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n", (void *) regs->u_regs[UREG_I7]); printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] " "pte[%lx] error[%lx]\n", @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n", regs->tpc, tl); - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc); + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc); printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]); - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n", + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n", (void *) regs->u_regs[UREG_I7]); printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] " "pte[%lx] error[%lx]\n", @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, fp = (unsigned long)sf->fp + STACK_BIAS; } - printk(" [%016lx] %pS\n", pc, (void *) pc); + printk(" [%016lx] %pA\n", pc, (void *) pc); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if ((pc + 8UL) == (unsigned long) &return_to_handler) { int index = tsk->curr_ret_stack; if (tsk->ret_stack && index >= graph) { pc = tsk->ret_stack[index - graph].ret; - printk(" [%016lx] %pS\n", pc, (void *) pc); + printk(" [%016lx] %pA\n", pc, (void *) pc); graph++; } } @@ -2254,7 +2265,7 @@ void die_if_kernel(char *str, struct pt_ while (rw && count++ < 30 && kstack_valid(tp, (unsigned long) rw)) { - printk("Caller[%016lx]: %pS\n", rw->ins[7], + printk("Caller[%016lx]: %pA\n", rw->ins[7], (void *) rw->ins[7]); rw = kernel_stack_up(rw); diff -urNp linux-2.6.39.1/arch/sparc/kernel/unaligned_64.c linux-2.6.39.1/arch/sparc/kernel/unaligned_64.c --- linux-2.6.39.1/arch/sparc/kernel/unaligned_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/kernel/unaligned_64.c 2011-05-22 19:41:32.000000000 -0400 @@ -278,7 +278,7 @@ static void log_unaligned(struct pt_regs static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); if (__ratelimit(&ratelimit)) { - printk("Kernel unaligned access at TPC[%lx] %pS\n", + printk("Kernel unaligned access at TPC[%lx] %pA\n", regs->tpc, (void *) regs->tpc); } } diff -urNp linux-2.6.39.1/arch/sparc/lib/atomic_64.S linux-2.6.39.1/arch/sparc/lib/atomic_64.S --- linux-2.6.39.1/arch/sparc/lib/atomic_64.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/lib/atomic_64.S 2011-05-22 19:36:30.000000000 -0400 @@ -18,7 +18,12 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: lduw [%o1], %g1 - add %g1, %o0, %g7 + addcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %icc, BACKOFF_LABEL(2f, 1b) @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at 2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic_add, .-atomic_add + .globl atomic_add_unchecked + .type atomic_add_unchecked,#function +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: lduw [%o1], %g1 + add %g1, %o0, %g7 + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 2f + nop + retl + nop +2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic_add_unchecked, .-atomic_add_unchecked + .globl atomic_sub .type atomic_sub,#function atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: lduw [%o1], %g1 - sub %g1, %o0, %g7 + subcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %icc, BACKOFF_LABEL(2f, 1b) @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at 2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic_sub, .-atomic_sub + .globl atomic_sub_unchecked + .type atomic_sub_unchecked,#function +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: lduw [%o1], %g1 + sub %g1, %o0, %g7 + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 2f + nop + retl + nop +2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic_sub_unchecked, .-atomic_sub_unchecked + .globl atomic_add_ret .type atomic_add_ret,#function atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: lduw [%o1], %g1 - add %g1, %o0, %g7 + addcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %icc, BACKOFF_LABEL(2f, 1b) @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic_add_ret, .-atomic_add_ret + .globl atomic_add_ret_unchecked + .type atomic_add_ret_unchecked,#function +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: lduw [%o1], %g1 + addcc %g1, %o0, %g7 + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 2f + add %g7, %o0, %g7 + sra %g7, 0, %o0 + retl + nop +2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked + .globl atomic_sub_ret .type atomic_sub_ret,#function atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: lduw [%o1], %g1 - sub %g1, %o0, %g7 + subcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %icc, 6 +#endif + cas [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %icc, BACKOFF_LABEL(2f, 1b) @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: ldx [%o1], %g1 - add %g1, %o0, %g7 + addcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %xcc, 6 +#endif + casx [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %xcc, BACKOFF_LABEL(2f, 1b) @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = 2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic64_add, .-atomic64_add + .globl atomic64_add_unchecked + .type atomic64_add_unchecked,#function +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: ldx [%o1], %g1 + addcc %g1, %o0, %g7 + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, 2f + nop + retl + nop +2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic64_add_unchecked, .-atomic64_add_unchecked + .globl atomic64_sub .type atomic64_sub,#function atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: ldx [%o1], %g1 - sub %g1, %o0, %g7 + subcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %xcc, 6 +#endif + casx [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %xcc, BACKOFF_LABEL(2f, 1b) @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = 2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic64_sub, .-atomic64_sub + .globl atomic64_sub_unchecked + .type atomic64_sub_unchecked,#function +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: ldx [%o1], %g1 + subcc %g1, %o0, %g7 + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, 2f + nop + retl + nop +2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked + .globl atomic64_add_ret .type atomic64_add_ret,#function atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: ldx [%o1], %g1 - add %g1, %o0, %g7 + addcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %xcc, 6 +#endif + casx [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %xcc, BACKOFF_LABEL(2f, 1b) @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o 2: BACKOFF_SPIN(%o2, %o3, 1b) .size atomic64_add_ret, .-atomic64_add_ret + .globl atomic64_add_ret_unchecked + .type atomic64_add_ret_unchecked,#function +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) +1: ldx [%o1], %g1 + addcc %g1, %o0, %g7 + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, 2f + add %g7, %o0, %g7 + mov %g7, %o0 + retl + nop +2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked + .globl atomic64_sub_ret .type atomic64_sub_ret,#function atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ BACKOFF_SETUP(%o2) 1: ldx [%o1], %g1 - sub %g1, %o0, %g7 + subcc %g1, %o0, %g7 + +#ifdef CONFIG_PAX_REFCOUNT + tvs %xcc, 6 +#endif + casx [%o1], %g1, %g7 cmp %g1, %g7 bne,pn %xcc, BACKOFF_LABEL(2f, 1b) diff -urNp linux-2.6.39.1/arch/sparc/lib/ksyms.c linux-2.6.39.1/arch/sparc/lib/ksyms.c --- linux-2.6.39.1/arch/sparc/lib/ksyms.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/lib/ksyms.c 2011-05-22 19:36:30.000000000 -0400 @@ -142,12 +142,17 @@ EXPORT_SYMBOL(__downgrade_write); /* Atomic counter implementation. */ EXPORT_SYMBOL(atomic_add); +EXPORT_SYMBOL(atomic_add_unchecked); EXPORT_SYMBOL(atomic_add_ret); EXPORT_SYMBOL(atomic_sub); +EXPORT_SYMBOL(atomic_sub_unchecked); EXPORT_SYMBOL(atomic_sub_ret); EXPORT_SYMBOL(atomic64_add); +EXPORT_SYMBOL(atomic64_add_unchecked); EXPORT_SYMBOL(atomic64_add_ret); +EXPORT_SYMBOL(atomic64_add_ret_unchecked); EXPORT_SYMBOL(atomic64_sub); +EXPORT_SYMBOL(atomic64_sub_unchecked); EXPORT_SYMBOL(atomic64_sub_ret); /* Atomic bit operations. */ diff -urNp linux-2.6.39.1/arch/sparc/lib/Makefile linux-2.6.39.1/arch/sparc/lib/Makefile --- linux-2.6.39.1/arch/sparc/lib/Makefile 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/lib/Makefile 2011-05-22 19:36:30.000000000 -0400 @@ -2,7 +2,7 @@ # asflags-y := -ansi -DST_DIV0=0x02 -ccflags-y := -Werror +#ccflags-y := -Werror lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o lib-$(CONFIG_SPARC32) += memcpy.o memset.o diff -urNp linux-2.6.39.1/arch/sparc/Makefile linux-2.6.39.1/arch/sparc/Makefile --- linux-2.6.39.1/arch/sparc/Makefile 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/Makefile 2011-05-22 19:41:32.000000000 -0400 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc # Export what is needed by arch/sparc/boot/Makefile export VMLINUX_INIT VMLINUX_MAIN VMLINUX_INIT := $(head-y) $(init-y) -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/ VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y) VMLINUX_MAIN += $(drivers-y) $(net-y) diff -urNp linux-2.6.39.1/arch/sparc/mm/fault_32.c linux-2.6.39.1/arch/sparc/mm/fault_32.c --- linux-2.6.39.1/arch/sparc/mm/fault_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/mm/fault_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -22,6 +22,9 @@ #include #include #include +#include +#include +#include #include #include @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str return safe_compute_effective_address(regs, insn); } +#ifdef CONFIG_PAX_PAGEEXEC +#ifdef CONFIG_PAX_DLRESOLVE +static void pax_emuplt_close(struct vm_area_struct *vma) +{ + vma->vm_mm->call_dl_resolve = 0UL; +} + +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + unsigned int *kaddr; + + vmf->page = alloc_page(GFP_HIGHUSER); + if (!vmf->page) + return VM_FAULT_OOM; + + kaddr = kmap(vmf->page); + memset(kaddr, 0, PAGE_SIZE); + kaddr[0] = 0x9DE3BFA8U; /* save */ + flush_dcache_page(vmf->page); + kunmap(vmf->page); + return VM_FAULT_MAJOR; +} + +static const struct vm_operations_struct pax_vm_ops = { + .close = pax_emuplt_close, + .fault = pax_emuplt_fault +}; + +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) +{ + int ret; + + INIT_LIST_HEAD(&vma->anon_vma_chain); + vma->vm_mm = current->mm; + vma->vm_start = addr; + vma->vm_end = addr + PAGE_SIZE; + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + vma->vm_ops = &pax_vm_ops; + + ret = insert_vm_struct(current->mm, vma); + if (ret) + return ret; + + ++current->mm->total_vm; + return 0; +} +#endif + +/* + * PaX: decide what to do with offenders (regs->pc = fault address) + * + * returns 1 when task should be killed + * 2 when patched PLT trampoline was detected + * 3 when unpatched PLT trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#ifdef CONFIG_PAX_EMUPLT + int err; + + do { /* PaX: patched PLT emulation #1 */ + unsigned int sethi1, sethi2, jmpl; + + err = get_user(sethi1, (unsigned int *)regs->pc); + err |= get_user(sethi2, (unsigned int *)(regs->pc+4)); + err |= get_user(jmpl, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x03000000U && + (jmpl & 0xFFFFE000U) == 0x81C06000U) + { + unsigned int addr; + + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; + addr = regs->u_regs[UREG_G1]; + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); + regs->pc = addr; + regs->npc = addr+4; + return 2; + } + } while (0); + + { /* PaX: patched PLT emulation #2 */ + unsigned int ba; + + err = get_user(ba, (unsigned int *)regs->pc); + + if (!err && (ba & 0xFFC00000U) == 0x30800000U) { + unsigned int addr; + + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); + regs->pc = addr; + regs->npc = addr+4; + return 2; + } + } + + do { /* PaX: patched PLT emulation #3 */ + unsigned int sethi, jmpl, nop; + + err = get_user(sethi, (unsigned int *)regs->pc); + err |= get_user(jmpl, (unsigned int *)(regs->pc+4)); + err |= get_user(nop, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + (jmpl & 0xFFFFE000U) == 0x81C06000U && + nop == 0x01000000U) + { + unsigned int addr; + + addr = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] = addr; + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); + regs->pc = addr; + regs->npc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: unpatched PLT emulation step 1 */ + unsigned int sethi, ba, nop; + + err = get_user(sethi, (unsigned int *)regs->pc); + err |= get_user(ba, (unsigned int *)(regs->pc+4)); + err |= get_user(nop, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && + nop == 0x01000000U) + { + unsigned int addr, save, call; + + if ((ba & 0xFFC00000U) == 0x30800000U) + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); + else + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); + + err = get_user(save, (unsigned int *)addr); + err |= get_user(call, (unsigned int *)(addr+4)); + err |= get_user(nop, (unsigned int *)(addr+8)); + if (err) + break; + +#ifdef CONFIG_PAX_DLRESOLVE + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + struct vm_area_struct *vma; + unsigned long call_dl_resolve; + + down_read(¤t->mm->mmap_sem); + call_dl_resolve = current->mm->call_dl_resolve; + up_read(¤t->mm->mmap_sem); + if (likely(call_dl_resolve)) + goto emulate; + + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + + down_write(¤t->mm->mmap_sem); + if (current->mm->call_dl_resolve) { + call_dl_resolve = current->mm->call_dl_resolve; + up_write(¤t->mm->mmap_sem); + if (vma) + kmem_cache_free(vm_area_cachep, vma); + goto emulate; + } + + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); + if (!vma || (call_dl_resolve & ~PAGE_MASK)) { + up_write(¤t->mm->mmap_sem); + if (vma) + kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + if (pax_insert_vma(vma, call_dl_resolve)) { + up_write(¤t->mm->mmap_sem); + kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + current->mm->call_dl_resolve = call_dl_resolve; + up_write(¤t->mm->mmap_sem); + +emulate: + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->pc = call_dl_resolve; + regs->npc = addr+4; + return 3; + } +#endif + + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */ + if ((save & 0xFFC00000U) == 0x05000000U && + (call & 0xFFFFE000U) == 0x85C0A000U && + nop == 0x01000000U) + { + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G2] = addr + 4; + addr = (save & 0x003FFFFFU) << 10; + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); + regs->pc = addr; + regs->npc = addr+4; + return 3; + } + } + } while (0); + + do { /* PaX: unpatched PLT emulation step 2 */ + unsigned int save, call, nop; + + err = get_user(save, (unsigned int *)(regs->pc-4)); + err |= get_user(call, (unsigned int *)regs->pc); + err |= get_user(nop, (unsigned int *)(regs->pc+4)); + if (err) + break; + + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2); + + regs->u_regs[UREG_RETPC] = regs->pc; + regs->pc = dl_resolve; + regs->npc = dl_resolve+4; + return 3; + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 8; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif + static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs, int text_fault) { @@ -281,6 +546,24 @@ good_area: if(!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { + +#ifdef CONFIG_PAX_PAGEEXEC + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) { + up_read(&mm->mmap_sem); + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_PAX_EMUPLT + case 2: + case 3: + return; +#endif + + } + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]); + do_group_exit(SIGKILL); + } +#endif + /* Allow reads even for write-only mappings */ if(!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; diff -urNp linux-2.6.39.1/arch/sparc/mm/fault_64.c linux-2.6.39.1/arch/sparc/mm/fault_64.c --- linux-2.6.39.1/arch/sparc/mm/fault_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/mm/fault_64.c 2011-05-22 19:41:32.000000000 -0400 @@ -21,6 +21,9 @@ #include #include #include +#include +#include +#include #include #include @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", regs->tpc); printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]); - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]); + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]); printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); dump_stack(); unhandled_fault(regs->tpc, current, regs); @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b show_regs(regs); } +#ifdef CONFIG_PAX_PAGEEXEC +#ifdef CONFIG_PAX_DLRESOLVE +static void pax_emuplt_close(struct vm_area_struct *vma) +{ + vma->vm_mm->call_dl_resolve = 0UL; +} + +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + unsigned int *kaddr; + + vmf->page = alloc_page(GFP_HIGHUSER); + if (!vmf->page) + return VM_FAULT_OOM; + + kaddr = kmap(vmf->page); + memset(kaddr, 0, PAGE_SIZE); + kaddr[0] = 0x9DE3BFA8U; /* save */ + flush_dcache_page(vmf->page); + kunmap(vmf->page); + return VM_FAULT_MAJOR; +} + +static const struct vm_operations_struct pax_vm_ops = { + .close = pax_emuplt_close, + .fault = pax_emuplt_fault +}; + +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) +{ + int ret; + + INIT_LIST_HEAD(&vma->anon_vma_chain); + vma->vm_mm = current->mm; + vma->vm_start = addr; + vma->vm_end = addr + PAGE_SIZE; + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + vma->vm_ops = &pax_vm_ops; + + ret = insert_vm_struct(current->mm, vma); + if (ret) + return ret; + + ++current->mm->total_vm; + return 0; +} +#endif + +/* + * PaX: decide what to do with offenders (regs->tpc = fault address) + * + * returns 1 when task should be killed + * 2 when patched PLT trampoline was detected + * 3 when unpatched PLT trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#ifdef CONFIG_PAX_EMUPLT + int err; + + do { /* PaX: patched PLT emulation #1 */ + unsigned int sethi1, sethi2, jmpl; + + err = get_user(sethi1, (unsigned int *)regs->tpc); + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4)); + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8)); + + if (err) + break; + + if ((sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x03000000U && + (jmpl & 0xFFFFE000U) == 0x81C06000U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; + addr = regs->u_regs[UREG_G1]; + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + { /* PaX: patched PLT emulation #2 */ + unsigned int ba; + + err = get_user(ba, (unsigned int *)regs->tpc); + + if (!err && (ba & 0xFFC00000U) == 0x30800000U) { + unsigned long addr; + + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } + + do { /* PaX: patched PLT emulation #3 */ + unsigned int sethi, jmpl, nop; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4)); + err |= get_user(nop, (unsigned int *)(regs->tpc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + (jmpl & 0xFFFFE000U) == 0x81C06000U && + nop == 0x01000000U) + { + unsigned long addr; + + addr = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] = addr; + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #4 */ + unsigned int sethi, mov1, call, mov2; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(mov1, (unsigned int *)(regs->tpc+4)); + err |= get_user(call, (unsigned int *)(regs->tpc+8)); + err |= get_user(mov2, (unsigned int *)(regs->tpc+12)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + mov1 == 0x8210000FU && + (call & 0xC0000000U) == 0x40000000U && + mov2 == 0x9E100001U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC]; + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #5 */ + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4)); + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8)); + err |= get_user(or1, (unsigned int *)(regs->tpc+12)); + err |= get_user(or2, (unsigned int *)(regs->tpc+16)); + err |= get_user(sllx, (unsigned int *)(regs->tpc+20)); + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24)); + err |= get_user(nop, (unsigned int *)(regs->tpc+28)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + (sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x0B000000U && + (or1 & 0xFFFFE000U) == 0x82106000U && + (or2 & 0xFFFFE000U) == 0x8A116000U && + sllx == 0x83287020U && + jmpl == 0x81C04005U && + nop == 0x01000000U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); + regs->u_regs[UREG_G1] <<= 32; + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #6 */ + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4)); + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8)); + err |= get_user(sllx, (unsigned int *)(regs->tpc+12)); + err |= get_user(or, (unsigned int *)(regs->tpc+16)); + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20)); + err |= get_user(nop, (unsigned int *)(regs->tpc+24)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + (sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x0B000000U && + sllx == 0x83287020U && + (or & 0xFFFFE000U) == 0x8A116000U && + jmpl == 0x81C04005U && + nop == 0x01000000U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] <<= 32; + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU); + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: unpatched PLT emulation step 1 */ + unsigned int sethi, ba, nop; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(ba, (unsigned int *)(regs->tpc+4)); + err |= get_user(nop, (unsigned int *)(regs->tpc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && + nop == 0x01000000U) + { + unsigned long addr; + unsigned int save, call; + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl; + + if ((ba & 0xFFC00000U) == 0x30800000U) + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); + else + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + err = get_user(save, (unsigned int *)addr); + err |= get_user(call, (unsigned int *)(addr+4)); + err |= get_user(nop, (unsigned int *)(addr+8)); + if (err) + break; + +#ifdef CONFIG_PAX_DLRESOLVE + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + struct vm_area_struct *vma; + unsigned long call_dl_resolve; + + down_read(¤t->mm->mmap_sem); + call_dl_resolve = current->mm->call_dl_resolve; + up_read(¤t->mm->mmap_sem); + if (likely(call_dl_resolve)) + goto emulate; + + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + + down_write(¤t->mm->mmap_sem); + if (current->mm->call_dl_resolve) { + call_dl_resolve = current->mm->call_dl_resolve; + up_write(¤t->mm->mmap_sem); + if (vma) + kmem_cache_free(vm_area_cachep, vma); + goto emulate; + } + + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); + if (!vma || (call_dl_resolve & ~PAGE_MASK)) { + up_write(¤t->mm->mmap_sem); + if (vma) + kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + if (pax_insert_vma(vma, call_dl_resolve)) { + up_write(¤t->mm->mmap_sem); + kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + current->mm->call_dl_resolve = call_dl_resolve; + up_write(¤t->mm->mmap_sem); + +emulate: + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->tpc = call_dl_resolve; + regs->tnpc = addr+4; + return 3; + } +#endif + + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */ + if ((save & 0xFFC00000U) == 0x05000000U && + (call & 0xFFFFE000U) == 0x85C0A000U && + nop == 0x01000000U) + { + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G2] = addr + 4; + addr = (save & 0x003FFFFFU) << 10; + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + regs->tpc = addr; + regs->tnpc = addr+4; + return 3; + } + + /* PaX: 64-bit PLT stub */ + err = get_user(sethi1, (unsigned int *)addr); + err |= get_user(sethi2, (unsigned int *)(addr+4)); + err |= get_user(or1, (unsigned int *)(addr+8)); + err |= get_user(or2, (unsigned int *)(addr+12)); + err |= get_user(sllx, (unsigned int *)(addr+16)); + err |= get_user(add, (unsigned int *)(addr+20)); + err |= get_user(jmpl, (unsigned int *)(addr+24)); + err |= get_user(nop, (unsigned int *)(addr+28)); + if (err) + break; + + if ((sethi1 & 0xFFC00000U) == 0x09000000U && + (sethi2 & 0xFFC00000U) == 0x0B000000U && + (or1 & 0xFFFFE000U) == 0x88112000U && + (or2 & 0xFFFFE000U) == 0x8A116000U && + sllx == 0x89293020U && + add == 0x8A010005U && + jmpl == 0x89C14000U && + nop == 0x01000000U) + { + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); + regs->u_regs[UREG_G4] <<= 32; + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4]; + regs->u_regs[UREG_G4] = addr + 24; + addr = regs->u_regs[UREG_G5]; + regs->tpc = addr; + regs->tnpc = addr+4; + return 3; + } + } + } while (0); + +#ifdef CONFIG_PAX_DLRESOLVE + do { /* PaX: unpatched PLT emulation step 2 */ + unsigned int save, call, nop; + + err = get_user(save, (unsigned int *)(regs->tpc-4)); + err |= get_user(call, (unsigned int *)regs->tpc); + err |= get_user(nop, (unsigned int *)(regs->tpc+4)); + if (err) + break; + + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); + + if (test_thread_flag(TIF_32BIT)) + dl_resolve &= 0xFFFFFFFFUL; + + regs->u_regs[UREG_RETPC] = regs->tpc; + regs->tpc = dl_resolve; + regs->tnpc = dl_resolve+4; + return 3; + } + } while (0); +#endif + + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */ + unsigned int sethi, ba, nop; + + err = get_user(sethi, (unsigned int *)regs->tpc); + err |= get_user(ba, (unsigned int *)(regs->tpc+4)); + err |= get_user(nop, (unsigned int *)(regs->tpc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + (ba & 0xFFF00000U) == 0x30600000U && + nop == 0x01000000U) + { + unsigned long addr; + + addr = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] = addr; + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); + + if (test_thread_flag(TIF_32BIT)) + addr &= 0xFFFFFFFFUL; + + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + +#endif + + return 1; +} + +void pax_report_insns(void *pc, void *sp) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 8; i++) { + unsigned int c; + if (get_user(c, (unsigned int *)pc+i)) + printk(KERN_CONT "???????? "); + else + printk(KERN_CONT "%08x ", c); + } + printk("\n"); +} +#endif + asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) { struct mm_struct *mm = current->mm; @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau if (!vma) goto bad_area; +#ifdef CONFIG_PAX_PAGEEXEC + /* PaX: detect ITLB misses on non-exec pages */ + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address && + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB)) + { + if (address != regs->tpc) + goto good_area; + + up_read(&mm->mmap_sem); + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_PAX_EMUPLT + case 2: + case 3: + return; +#endif + + } + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS)); + do_group_exit(SIGKILL); + } +#endif + /* Pure DTLB misses do not tell us whether the fault causing * load/store/atomic was a write or not, it only says that there * was no match. So in such a case we (carefully) read the diff -urNp linux-2.6.39.1/arch/sparc/mm/hugetlbpage.c linux-2.6.39.1/arch/sparc/mm/hugetlbpage.c --- linux-2.6.39.1/arch/sparc/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/mm/hugetlbpage.c 2011-05-22 19:36:30.000000000 -0400 @@ -68,7 +68,7 @@ full_search: } return -ENOMEM; } - if (likely(!vma || addr + len <= vma->vm_start)) { + if (likely(check_heap_stack_gap(vma, addr, len))) { /* * Remember the place where we stopped the search: */ @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct /* make sure it can fit in the remaining address space */ if (likely(addr > len)) { vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { + if (check_heap_stack_gap(vma, addr - len, len)) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr-len); } @@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct if (unlikely(mm->mmap_base < len)) goto bottomup; - addr = (mm->mmap_base-len) & HPAGE_MASK; + addr = mm->mmap_base - len; do { + addr &= HPAGE_MASK; /* * Lookup failure means no vma is above this address, * else if new region fits below vma->vm_start, * return with success: */ vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { + if (likely(check_heap_stack_gap(vma, addr, len))) { /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); } @@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ - addr = (vma->vm_start-len) & HPAGE_MASK; - } while (likely(len < vma->vm_start)); + addr = skip_heap_stack_gap(vma, len); + } while (!IS_ERR_VALUE(addr)); bottomup: /* @@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f if (addr) { addr = ALIGN(addr, HPAGE_SIZE); vma = find_vma(mm, addr); - if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff -urNp linux-2.6.39.1/arch/sparc/mm/init_32.c linux-2.6.39.1/arch/sparc/mm/init_32.c --- linux-2.6.39.1/arch/sparc/mm/init_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/mm/init_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -318,6 +318,9 @@ extern void device_scan(void); pgprot_t PAGE_SHARED __read_mostly; EXPORT_SYMBOL(PAGE_SHARED); +pgprot_t PAGE_SHARED_NOEXEC __read_mostly; +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC); + void __init paging_init(void) { switch(sparc_cpu_model) { @@ -346,17 +349,17 @@ void __init paging_init(void) /* Initialize the protection map with non-constant, MMU dependent values. */ protection_map[0] = PAGE_NONE; - protection_map[1] = PAGE_READONLY; - protection_map[2] = PAGE_COPY; - protection_map[3] = PAGE_COPY; + protection_map[1] = PAGE_READONLY_NOEXEC; + protection_map[2] = PAGE_COPY_NOEXEC; + protection_map[3] = PAGE_COPY_NOEXEC; protection_map[4] = PAGE_READONLY; protection_map[5] = PAGE_READONLY; protection_map[6] = PAGE_COPY; protection_map[7] = PAGE_COPY; protection_map[8] = PAGE_NONE; - protection_map[9] = PAGE_READONLY; - protection_map[10] = PAGE_SHARED; - protection_map[11] = PAGE_SHARED; + protection_map[9] = PAGE_READONLY_NOEXEC; + protection_map[10] = PAGE_SHARED_NOEXEC; + protection_map[11] = PAGE_SHARED_NOEXEC; protection_map[12] = PAGE_READONLY; protection_map[13] = PAGE_READONLY; protection_map[14] = PAGE_SHARED; diff -urNp linux-2.6.39.1/arch/sparc/mm/Makefile linux-2.6.39.1/arch/sparc/mm/Makefile --- linux-2.6.39.1/arch/sparc/mm/Makefile 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/mm/Makefile 2011-05-22 19:36:30.000000000 -0400 @@ -2,7 +2,7 @@ # asflags-y := -ansi -ccflags-y := -Werror +#ccflags-y := -Werror obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o obj-y += fault_$(BITS).o diff -urNp linux-2.6.39.1/arch/sparc/mm/srmmu.c linux-2.6.39.1/arch/sparc/mm/srmmu.c --- linux-2.6.39.1/arch/sparc/mm/srmmu.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/sparc/mm/srmmu.c 2011-05-22 19:36:30.000000000 -0400 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void) PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED); BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); + +#ifdef CONFIG_PAX_PAGEEXEC + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC); + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC)); + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC)); +#endif + BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); page_kernel = pgprot_val(SRMMU_PAGE_KERNEL); diff -urNp linux-2.6.39.1/arch/tile/kernel/pci.c linux-2.6.39.1/arch/tile/kernel/pci.c --- linux-2.6.39.1/arch/tile/kernel/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/tile/kernel/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -60,7 +60,7 @@ int __write_once tile_plx_gen1; static struct pci_controller controllers[TILE_NUM_PCIE]; static int num_controllers; -static struct pci_ops tile_cfg_ops; +static const struct pci_ops tile_cfg_ops; /* @@ -564,7 +564,7 @@ static int __devinit tile_cfg_write(stru } -static struct pci_ops tile_cfg_ops = { +static const struct pci_ops tile_cfg_ops = { .read = tile_cfg_read, .write = tile_cfg_write, }; diff -urNp linux-2.6.39.1/arch/um/include/asm/kmap_types.h linux-2.6.39.1/arch/um/include/asm/kmap_types.h --- linux-2.6.39.1/arch/um/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/um/include/asm/kmap_types.h 2011-05-22 19:36:30.000000000 -0400 @@ -23,6 +23,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_CLEARPAGE, KM_TYPE_NR }; diff -urNp linux-2.6.39.1/arch/um/include/asm/page.h linux-2.6.39.1/arch/um/include/asm/page.h --- linux-2.6.39.1/arch/um/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/um/include/asm/page.h 2011-05-22 19:36:30.000000000 -0400 @@ -14,6 +14,9 @@ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) +#define ktla_ktva(addr) (addr) +#define ktva_ktla(addr) (addr) + #ifndef __ASSEMBLY__ struct page; diff -urNp linux-2.6.39.1/arch/um/kernel/process.c linux-2.6.39.1/arch/um/kernel/process.c --- linux-2.6.39.1/arch/um/kernel/process.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/um/kernel/process.c 2011-05-22 19:36:30.000000000 -0400 @@ -404,22 +404,6 @@ int singlestepping(void * t) return 2; } -/* - * Only x86 and x86_64 have an arch_align_stack(). - * All other arches have "#define arch_align_stack(x) (x)" - * in their asm/system.h - * As this is included in UML from asm-um/system-generic.h, - * we can use it to behave as the subarch does. - */ -#ifndef arch_align_stack -unsigned long arch_align_stack(unsigned long sp) -{ - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) - sp -= get_random_int() % 8192; - return sp & ~0xf; -} -#endif - unsigned long get_wchan(struct task_struct *p) { unsigned long stack_page, sp, ip; diff -urNp linux-2.6.39.1/arch/um/sys-i386/syscalls.c linux-2.6.39.1/arch/um/sys-i386/syscalls.c --- linux-2.6.39.1/arch/um/sys-i386/syscalls.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/um/sys-i386/syscalls.c 2011-05-22 19:36:30.000000000 -0400 @@ -11,6 +11,21 @@ #include "asm/uaccess.h" #include "asm/unistd.h" +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) +{ + unsigned long pax_task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif + + if (len > pax_task_size || addr > pax_task_size - len) + return -EINVAL; + + return 0; +} + /* * The prototype on i386 is: * diff -urNp linux-2.6.39.1/arch/unicore32/kernel/pci.c linux-2.6.39.1/arch/unicore32/kernel/pci.c --- linux-2.6.39.1/arch/unicore32/kernel/pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/unicore32/kernel/pci.c 2011-05-22 19:36:30.000000000 -0400 @@ -66,7 +66,7 @@ puv3_write_config(struct pci_bus *bus, u return PCIBIOS_SUCCESSFUL; } -struct pci_ops pci_puv3_ops = { +const struct pci_ops pci_puv3_ops = { .read = puv3_read_config, .write = puv3_write_config, }; diff -urNp linux-2.6.39.1/arch/x86/boot/bitops.h linux-2.6.39.1/arch/x86/boot/bitops.h --- linux-2.6.39.1/arch/x86/boot/bitops.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/boot/bitops.h 2011-05-22 19:36:30.000000000 -0400 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int u8 v; const u32 *p = (const u32 *)addr; - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); return v; } @@ -37,7 +37,7 @@ static inline int variable_test_bit(int static inline void set_bit(int nr, void *addr) { - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr)); + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr)); } #endif /* BOOT_BITOPS_H */ diff -urNp linux-2.6.39.1/arch/x86/boot/boot.h linux-2.6.39.1/arch/x86/boot/boot.h --- linux-2.6.39.1/arch/x86/boot/boot.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/boot/boot.h 2011-05-22 19:36:30.000000000 -0400 @@ -85,7 +85,7 @@ static inline void io_delay(void) static inline u16 ds(void) { u16 seg; - asm("movw %%ds,%0" : "=rm" (seg)); + asm volatile("movw %%ds,%0" : "=rm" (seg)); return seg; } @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t static inline int memcmp(const void *s1, const void *s2, size_t len) { u8 diff; - asm("repe; cmpsb; setnz %0" + asm volatile("repe; cmpsb; setnz %0" : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len)); return diff; } diff -urNp linux-2.6.39.1/arch/x86/boot/compressed/head_32.S linux-2.6.39.1/arch/x86/boot/compressed/head_32.S --- linux-2.6.39.1/arch/x86/boot/compressed/head_32.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/boot/compressed/head_32.S 2011-05-22 19:36:30.000000000 -0400 @@ -76,7 +76,7 @@ ENTRY(startup_32) notl %eax andl %eax, %ebx #else - movl $LOAD_PHYSICAL_ADDR, %ebx + movl $____LOAD_PHYSICAL_ADDR, %ebx #endif /* Target address to relocate to for decompression */ @@ -162,7 +162,7 @@ relocated: * and where it was actually loaded. */ movl %ebp, %ebx - subl $LOAD_PHYSICAL_ADDR, %ebx + subl $____LOAD_PHYSICAL_ADDR, %ebx jz 2f /* Nothing to be done if loaded at compiled addr. */ /* * Process relocations. @@ -170,8 +170,7 @@ relocated: 1: subl $4, %edi movl (%edi), %ecx - testl %ecx, %ecx - jz 2f + jecxz 2f addl %ebx, -__PAGE_OFFSET(%ebx, %ecx) jmp 1b 2: diff -urNp linux-2.6.39.1/arch/x86/boot/compressed/head_64.S linux-2.6.39.1/arch/x86/boot/compressed/head_64.S --- linux-2.6.39.1/arch/x86/boot/compressed/head_64.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/boot/compressed/head_64.S 2011-05-22 19:36:30.000000000 -0400 @@ -91,7 +91,7 @@ ENTRY(startup_32) notl %eax andl %eax, %ebx #else - movl $LOAD_PHYSICAL_ADDR, %ebx + movl $____LOAD_PHYSICAL_ADDR, %ebx #endif /* Target address to relocate to for decompression */ @@ -233,7 +233,7 @@ ENTRY(startup_64) notq %rax andq %rax, %rbp #else - movq $LOAD_PHYSICAL_ADDR, %rbp + movq $____LOAD_PHYSICAL_ADDR, %rbp #endif /* Target address to relocate to for decompression */ diff -urNp linux-2.6.39.1/arch/x86/boot/compressed/misc.c linux-2.6.39.1/arch/x86/boot/compressed/misc.c --- linux-2.6.39.1/arch/x86/boot/compressed/misc.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/boot/compressed/misc.c 2011-05-22 19:36:30.000000000 -0400 @@ -310,7 +310,7 @@ static void parse_elf(void *output) case PT_LOAD: #ifdef CONFIG_RELOCATABLE dest = output; - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR); #else dest = (void *)(phdr->p_paddr); #endif @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void * error("Destination address too large"); #endif #ifndef CONFIG_RELOCATABLE - if ((unsigned long)output != LOAD_PHYSICAL_ADDR) + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR) error("Wrong destination address"); #endif diff -urNp linux-2.6.39.1/arch/x86/boot/compressed/relocs.c linux-2.6.39.1/arch/x86/boot/compressed/relocs.c --- linux-2.6.39.1/arch/x86/boot/compressed/relocs.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/boot/compressed/relocs.c 2011-05-22 19:36:30.000000000 -0400 @@ -13,8 +13,11 @@ static void die(char *fmt, ...); +#include "../../../../include/generated/autoconf.h" + #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) static Elf32_Ehdr ehdr; +static Elf32_Phdr *phdr; static unsigned long reloc_count, reloc_idx; static unsigned long *relocs; @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp) } } +static void read_phdrs(FILE *fp) +{ + unsigned int i; + + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr)); + if (!phdr) { + die("Unable to allocate %d program headers\n", + ehdr.e_phnum); + } + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) { + die("Seek to %d failed: %s\n", + ehdr.e_phoff, strerror(errno)); + } + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) { + die("Cannot read ELF program headers: %s\n", + strerror(errno)); + } + for(i = 0; i < ehdr.e_phnum; i++) { + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type); + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset); + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr); + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr); + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz); + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz); + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags); + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align); + } + +} + static void read_shdrs(FILE *fp) { - int i; + unsigned int i; Elf32_Shdr shdr; secs = calloc(ehdr.e_shnum, sizeof(struct section)); @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp) static void read_strtabs(FILE *fp) { - int i; + unsigned int i; for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; if (sec->shdr.sh_type != SHT_STRTAB) { @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp) static void read_symtabs(FILE *fp) { - int i,j; + unsigned int i,j; for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; if (sec->shdr.sh_type != SHT_SYMTAB) { @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp) static void read_relocs(FILE *fp) { - int i,j; + unsigned int i,j; + uint32_t base; + for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; if (sec->shdr.sh_type != SHT_REL) { @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp) die("Cannot read symbol table: %s\n", strerror(errno)); } + base = 0; + for (j = 0; j < ehdr.e_phnum; j++) { + if (phdr[j].p_type != PT_LOAD ) + continue; + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz) + continue; + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr; + break; + } for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { Elf32_Rel *rel = &sec->reltab[j]; - rel->r_offset = elf32_to_cpu(rel->r_offset); + rel->r_offset = elf32_to_cpu(rel->r_offset) + base; rel->r_info = elf32_to_cpu(rel->r_info); } } @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp) static void print_absolute_symbols(void) { - int i; + unsigned int i; printf("Absolute symbols\n"); printf(" Num: Value Size Type Bind Visibility Name\n"); for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; char *sym_strtab; Elf32_Sym *sh_symtab; - int j; + unsigned int j; if (sec->shdr.sh_type != SHT_SYMTAB) { continue; @@ -431,14 +475,14 @@ static void print_absolute_symbols(void) static void print_absolute_relocs(void) { - int i, printed = 0; + unsigned int i, printed = 0; for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; struct section *sec_applies, *sec_symtab; char *sym_strtab; Elf32_Sym *sh_symtab; - int j; + unsigned int j; if (sec->shdr.sh_type != SHT_REL) { continue; } @@ -499,13 +543,13 @@ static void print_absolute_relocs(void) static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) { - int i; + unsigned int i; /* Walk through the relocations */ for (i = 0; i < ehdr.e_shnum; i++) { char *sym_strtab; Elf32_Sym *sh_symtab; struct section *sec_applies, *sec_symtab; - int j; + unsigned int j; struct section *sec = &secs[i]; if (sec->shdr.sh_type != SHT_REL) { @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El !is_rel_reloc(sym_name(sym_strtab, sym))) { continue; } + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */ + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load")) + continue; + +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32) + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */ + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext")) + continue; + if (!strcmp(sec_name(sym->st_shndx), ".init.text")) + continue; + if (!strcmp(sec_name(sym->st_shndx), ".exit.text")) + continue; + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR")) + continue; +#endif + switch (r_type) { case R_386_NONE: case R_386_PC32: @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co static void emit_relocs(int as_text) { - int i; + unsigned int i; /* Count how many relocations I have and allocate space for them. */ reloc_count = 0; walk_relocs(count_reloc); @@ -665,6 +725,7 @@ int main(int argc, char **argv) fname, strerror(errno)); } read_ehdr(fp); + read_phdrs(fp); read_shdrs(fp); read_strtabs(fp); read_symtabs(fp); diff -urNp linux-2.6.39.1/arch/x86/boot/cpucheck.c linux-2.6.39.1/arch/x86/boot/cpucheck.c --- linux-2.6.39.1/arch/x86/boot/cpucheck.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/boot/cpucheck.c 2011-05-22 19:36:30.000000000 -0400 @@ -74,7 +74,7 @@ static int has_fpu(void) u16 fcw = -1, fsw = -1; u32 cr0; - asm("movl %%cr0,%0" : "=r" (cr0)); + asm volatile("movl %%cr0,%0" : "=r" (cr0)); if (cr0 & (X86_CR0_EM|X86_CR0_TS)) { cr0 &= ~(X86_CR0_EM|X86_CR0_TS); asm volatile("movl %0,%%cr0" : : "r" (cr0)); @@ -90,7 +90,7 @@ static int has_eflag(u32 mask) { u32 f0, f1; - asm("pushfl ; " + asm volatile("pushfl ; " "pushfl ; " "popl %0 ; " "movl %0,%1 ; " @@ -115,7 +115,7 @@ static void get_flags(void) set_bit(X86_FEATURE_FPU, cpu.flags); if (has_eflag(X86_EFLAGS_ID)) { - asm("cpuid" + asm volatile("cpuid" : "=a" (max_intel_level), "=b" (cpu_vendor[0]), "=d" (cpu_vendor[1]), @@ -124,7 +124,7 @@ static void get_flags(void) if (max_intel_level >= 0x00000001 && max_intel_level <= 0x0000ffff) { - asm("cpuid" + asm volatile("cpuid" : "=a" (tfms), "=c" (cpu.flags[4]), "=d" (cpu.flags[0]) @@ -136,7 +136,7 @@ static void get_flags(void) cpu.model += ((tfms >> 16) & 0xf) << 4; } - asm("cpuid" + asm volatile("cpuid" : "=a" (max_amd_level) : "a" (0x80000000) : "ebx", "ecx", "edx"); @@ -144,7 +144,7 @@ static void get_flags(void) if (max_amd_level >= 0x80000001 && max_amd_level <= 0x8000ffff) { u32 eax = 0x80000001; - asm("cpuid" + asm volatile("cpuid" : "+a" (eax), "=c" (cpu.flags[6]), "=d" (cpu.flags[1]) @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r u32 ecx = MSR_K7_HWCR; u32 eax, edx; - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); eax &= ~(1 << 15); - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); get_flags(); /* Make sure it really did something */ err = check_flags(); @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r u32 ecx = MSR_VIA_FCR; u32 eax, edx; - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); eax |= (1<<1)|(1<<7); - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); set_bit(X86_FEATURE_CX8, cpu.flags); err = check_flags(); @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r u32 eax, edx; u32 level = 1; - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); - asm("cpuid" + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); + asm volatile("cpuid" : "+a" (level), "=d" (cpu.flags[0]) : : "ecx", "ebx"); - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); err = check_flags(); } diff -urNp linux-2.6.39.1/arch/x86/boot/header.S linux-2.6.39.1/arch/x86/boot/header.S --- linux-2.6.39.1/arch/x86/boot/header.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/boot/header.S 2011-05-22 19:36:30.000000000 -0400 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical # single linked list of # struct setup_data -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset) #define VO_INIT_SIZE (VO__end - VO__text) diff -urNp linux-2.6.39.1/arch/x86/boot/memory.c linux-2.6.39.1/arch/x86/boot/memory.c --- linux-2.6.39.1/arch/x86/boot/memory.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/boot/memory.c 2011-05-22 19:36:30.000000000 -0400 @@ -19,7 +19,7 @@ static int detect_memory_e820(void) { - int count = 0; + unsigned int count = 0; struct biosregs ireg, oreg; struct e820entry *desc = boot_params.e820_map; static struct e820entry buf; /* static so it is zeroed */ diff -urNp linux-2.6.39.1/arch/x86/boot/video.c linux-2.6.39.1/arch/x86/boot/video.c --- linux-2.6.39.1/arch/x86/boot/video.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/boot/video.c 2011-05-22 19:36:30.000000000 -0400 @@ -96,7 +96,7 @@ static void store_mode_params(void) static unsigned int get_entry(void) { char entry_buf[4]; - int i, len = 0; + unsigned int i, len = 0; int key; unsigned int v; diff -urNp linux-2.6.39.1/arch/x86/boot/video-vesa.c linux-2.6.39.1/arch/x86/boot/video-vesa.c --- linux-2.6.39.1/arch/x86/boot/video-vesa.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/boot/video-vesa.c 2011-05-22 19:36:30.000000000 -0400 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void) boot_params.screen_info.vesapm_seg = oreg.es; boot_params.screen_info.vesapm_off = oreg.di; + boot_params.screen_info.vesapm_size = oreg.cx; } /* diff -urNp linux-2.6.39.1/arch/x86/ia32/ia32_aout.c linux-2.6.39.1/arch/x86/ia32/ia32_aout.c --- linux-2.6.39.1/arch/x86/ia32/ia32_aout.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/ia32/ia32_aout.c 2011-05-22 19:41:32.000000000 -0400 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st unsigned long dump_start, dump_size; struct user32 dump; + memset(&dump, 0, sizeof(dump)); + fs = get_fs(); set_fs(KERNEL_DS); has_dumped = 1; diff -urNp linux-2.6.39.1/arch/x86/ia32/ia32entry.S linux-2.6.39.1/arch/x86/ia32/ia32entry.S --- linux-2.6.39.1/arch/x86/ia32/ia32entry.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/ia32/ia32entry.S 2011-05-23 17:16:01.000000000 -0400 @@ -13,6 +13,7 @@ #include #include #include +#include #include /* Avoid __ASSEMBLER__'ifying just for this. */ @@ -95,6 +96,32 @@ ENTRY(native_irq_enable_sysexit) ENDPROC(native_irq_enable_sysexit) #endif + .macro pax_enter_kernel_user +#ifdef CONFIG_PAX_MEMORY_UDEREF + call pax_enter_kernel_user +#endif + .endm + + .macro pax_exit_kernel_user +#ifdef CONFIG_PAX_MEMORY_UDEREF + call pax_exit_kernel_user +#endif +#ifdef CONFIG_PAX_RANDKSTACK + pushq %rax + call pax_randomize_kstack + popq %rax +#endif +#ifdef CONFIG_PAX_MEMORY_STACKLEAK + call pax_erase_kstack +#endif + .endm + + .macro pax_erase_kstack +#ifdef CONFIG_PAX_MEMORY_STACKLEAK + call pax_erase_kstack +#endif + .endm + /* * 32bit SYSENTER instruction entry. * @@ -121,7 +148,7 @@ ENTRY(ia32_sysenter_target) CFI_REGISTER rsp,rbp SWAPGS_UNSAFE_STACK movq PER_CPU_VAR(kernel_stack), %rsp - addq $(KERNEL_STACK_OFFSET),%rsp + pax_enter_kernel_user /* * No need to follow this irqs on/off section: the syscall * disabled irqs, here we enable it straight after entry: @@ -134,7 +161,8 @@ ENTRY(ia32_sysenter_target) CFI_REL_OFFSET rsp,0 pushfq_cfi /*CFI_REL_OFFSET rflags,0*/ - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d + GET_THREAD_INFO(%r10) + movl TI_sysenter_return(%r10), %r10d CFI_REGISTER rip,r10 pushq_cfi $__USER32_CS /*CFI_REL_OFFSET cs,0*/ @@ -146,6 +174,12 @@ ENTRY(ia32_sysenter_target) SAVE_ARGS 0,0,1 /* no need to do an access_ok check here because rbp has been 32bit zero extended */ + +#ifdef CONFIG_PAX_MEMORY_UDEREF + mov $PAX_USER_SHADOW_BASE,%r10 + add %r10,%rbp +#endif + 1: movl (%rbp),%ebp .section __ex_table,"a" .quad 1b,ia32_badarg @@ -168,6 +202,7 @@ sysenter_dispatch: testl $_TIF_ALLWORK_MASK,TI_flags(%r10) jnz sysexit_audit sysexit_from_sys_call: + pax_exit_kernel_user andl $~TS_COMPAT,TI_status(%r10) /* clear IF, that popfq doesn't enable interrupts early */ andl $~0x200,EFLAGS-R11(%rsp) @@ -194,6 +229,9 @@ sysexit_from_sys_call: movl %eax,%esi /* 2nd arg: syscall number */ movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ call audit_syscall_entry + + pax_erase_kstack + movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ cmpq $(IA32_NR_syscalls-1),%rax ja ia32_badsys @@ -246,6 +284,9 @@ sysenter_tracesys: movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */ movq %rsp,%rdi /* &pt_regs -> arg1 */ call syscall_trace_enter + + pax_erase_kstack + LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ RESTORE_REST cmpq $(IA32_NR_syscalls-1),%rax @@ -277,19 +318,24 @@ ENDPROC(ia32_sysenter_target) ENTRY(ia32_cstar_target) CFI_STARTPROC32 simple CFI_SIGNAL_FRAME - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET + CFI_DEF_CFA rsp,0 CFI_REGISTER rip,rcx /*CFI_REGISTER rflags,r11*/ SWAPGS_UNSAFE_STACK movl %esp,%r8d CFI_REGISTER rsp,r8 movq PER_CPU_VAR(kernel_stack),%rsp + +#ifdef CONFIG_PAX_MEMORY_UDEREF + pax_enter_kernel_user +#endif + /* * No need to follow this irqs on/off section: the syscall * disabled irqs and here we enable it straight after entry: */ ENABLE_INTERRUPTS(CLBR_NONE) - SAVE_ARGS 8,1,1 + SAVE_ARGS 8*6,1,1 movl %eax,%eax /* zero extension */ movq %rax,ORIG_RAX-ARGOFFSET(%rsp) movq %rcx,RIP-ARGOFFSET(%rsp) @@ -305,6 +351,12 @@ ENTRY(ia32_cstar_target) /* no need to do an access_ok check here because r8 has been 32bit zero extended */ /* hardware stack frame is complete now */ + +#ifdef CONFIG_PAX_MEMORY_UDEREF + mov $PAX_USER_SHADOW_BASE,%r10 + add %r10,%r8 +#endif + 1: movl (%r8),%r9d .section __ex_table,"a" .quad 1b,ia32_badarg @@ -327,6 +379,7 @@ cstar_dispatch: testl $_TIF_ALLWORK_MASK,TI_flags(%r10) jnz sysretl_audit sysretl_from_sys_call: + pax_exit_kernel_user andl $~TS_COMPAT,TI_status(%r10) RESTORE_ARGS 1,-ARG_SKIP,1,1,1 movl RIP-ARGOFFSET(%rsp),%ecx @@ -364,6 +417,9 @@ cstar_tracesys: movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ movq %rsp,%rdi /* &pt_regs -> arg1 */ call syscall_trace_enter + + pax_erase_kstack + LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */ RESTORE_REST xchgl %ebp,%r9d @@ -409,6 +465,7 @@ ENTRY(ia32_syscall) CFI_REL_OFFSET rip,RIP-RIP PARAVIRT_ADJUST_EXCEPTION_FRAME SWAPGS + pax_enter_kernel_user /* * No need to follow this irqs on/off section: the syscall * disabled irqs and here we enable it straight after entry: @@ -441,6 +498,9 @@ ia32_tracesys: movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ movq %rsp,%rdi /* &pt_regs -> arg1 */ call syscall_trace_enter + + pax_erase_kstack + LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ RESTORE_REST cmpq $(IA32_NR_syscalls-1),%rax diff -urNp linux-2.6.39.1/arch/x86/ia32/ia32_signal.c linux-2.6.39.1/arch/x86/ia32/ia32_signal.c --- linux-2.6.39.1/arch/x86/ia32/ia32_signal.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/ia32/ia32_signal.c 2011-05-22 19:36:30.000000000 -0400 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct sp -= frame_size; /* Align the stack pointer according to the i386 ABI, * i.e. so that on function entry ((sp + 4) & 15) == 0. */ - sp = ((sp + 4) & -16ul) - 4; + sp = ((sp - 12) & -16ul) - 4; return (void __user *) sp; } @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s * These are actually not used anymore, but left because some * gdb versions depend on them as a marker. */ - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode); + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode); } put_user_catch(err); if (err) @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct 0xb8, __NR_ia32_rt_sigreturn, 0x80cd, - 0, + 0 }; frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; + else if (current->mm->context.vdso) + /* Return stub is in 32bit vsyscall page */ + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); else - restorer = VDSO32_SYMBOL(current->mm->context.vdso, - rt_sigreturn); + restorer = &frame->retcode; put_user_ex(ptr_to_compat(restorer), &frame->pretcode); /* * Not actually used anymore, but left because some gdb * versions need it. */ - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode); + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode); } put_user_catch(err); if (err) diff -urNp linux-2.6.39.1/arch/x86/include/asm/alternative.h linux-2.6.39.1/arch/x86/include/asm/alternative.h --- linux-2.6.39.1/arch/x86/include/asm/alternative.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/alternative.h 2011-05-22 19:36:30.000000000 -0400 @@ -94,7 +94,7 @@ static inline int alternatives_text_rese ".section .discard,\"aw\",@progbits\n" \ " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \ ".previous\n" \ - ".section .altinstr_replacement, \"ax\"\n" \ + ".section .altinstr_replacement, \"a\"\n" \ "663:\n\t" newinstr "\n664:\n" /* replacement */ \ ".previous" diff -urNp linux-2.6.39.1/arch/x86/include/asm/apm.h linux-2.6.39.1/arch/x86/include/asm/apm.h --- linux-2.6.39.1/arch/x86/include/asm/apm.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/apm.h 2011-05-22 19:36:30.000000000 -0400 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" - "lcall *%%cs:apm_bios_entry\n\t" + "lcall *%%ss:apm_bios_entry\n\t" "setc %%al\n\t" "popl %%ebp\n\t" "popl %%edi\n\t" @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" - "lcall *%%cs:apm_bios_entry\n\t" + "lcall *%%ss:apm_bios_entry\n\t" "setc %%bl\n\t" "popl %%ebp\n\t" "popl %%edi\n\t" diff -urNp linux-2.6.39.1/arch/x86/include/asm/atomic64_32.h linux-2.6.39.1/arch/x86/include/asm/atomic64_32.h --- linux-2.6.39.1/arch/x86/include/asm/atomic64_32.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/atomic64_32.h 2011-05-22 19:36:30.000000000 -0400 @@ -12,6 +12,14 @@ typedef struct { u64 __aligned(8) counter; } atomic64_t; +#ifdef CONFIG_PAX_REFCOUNT +typedef struct { + u64 __aligned(8) counter; +} atomic64_unchecked_t; +#else +typedef atomic64_t atomic64_unchecked_t; +#endif + #define ATOMIC64_INIT(val) { (val) } #ifdef CONFIG_X86_CMPXCHG64 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg } /** + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable + * @p: pointer to type atomic64_unchecked_t + * @o: expected value + * @n: new value + * + * Atomically sets @v to @n if it was equal to @o and returns + * the old value. + */ + +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n) +{ + return cmpxchg64(&v->counter, o, n); +} + +/** * atomic64_xchg - xchg atomic64 variable * @v: pointer to type atomic64_t * @n: value to assign @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64 } /** + * atomic64_set_unchecked - set atomic64 variable + * @v: pointer to type atomic64_unchecked_t + * @n: value to assign + * + * Atomically sets the value of @v to @n. + */ +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i) +{ + unsigned high = (unsigned)(i >> 32); + unsigned low = (unsigned)i; + asm volatile(ATOMIC64_ALTERNATIVE(set) + : "+b" (low), "+c" (high) + : "S" (v) + : "eax", "edx", "memory" + ); +} + +/** * atomic64_read - read atomic64 variable * @v: pointer to type atomic64_t * @@ -93,6 +134,22 @@ static inline long long atomic64_read(at } /** + * atomic64_read_unchecked - read atomic64 variable + * @v: pointer to type atomic64_unchecked_t + * + * Atomically reads the value of @v and returns it. + */ +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v) +{ + long long r; + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked) + : "=A" (r), "+c" (v) + : : "memory" + ); + return r; + } + +/** * atomic64_add_return - add and return * @i: integer value to add * @v: pointer to type atomic64_t @@ -108,6 +165,22 @@ static inline long long atomic64_add_ret return i; } +/** + * atomic64_add_return_unchecked - add and return + * @i: integer value to add + * @v: pointer to type atomic64_unchecked_t + * + * Atomically adds @i to @v and returns @i + *@v + */ +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v) +{ + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked) + : "+A" (i), "+c" (v) + : : "memory" + ); + return i; +} + /* * Other variants with different arithmetic operators: */ @@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret return a; } +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) +{ + long long a; + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked) + : "=A" (a) + : "S" (v) + : "memory", "ecx" + ); + return a; +} + static inline long long atomic64_dec_return(atomic64_t *v) { long long a; @@ -159,6 +243,22 @@ static inline long long atomic64_add(lon } /** + * atomic64_add_unchecked - add integer to atomic64 variable + * @i: integer value to add + * @v: pointer to type atomic64_unchecked_t + * + * Atomically adds @i to @v. + */ +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v) +{ + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked) + : "+A" (i), "+c" (v) + : : "memory" + ); + return i; +} + +/** * atomic64_sub - subtract the atomic64 variable * @i: integer value to subtract * @v: pointer to type atomic64_t diff -urNp linux-2.6.39.1/arch/x86/include/asm/atomic64_64.h linux-2.6.39.1/arch/x86/include/asm/atomic64_64.h --- linux-2.6.39.1/arch/x86/include/asm/atomic64_64.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/atomic64_64.h 2011-05-22 19:36:30.000000000 -0400 @@ -18,7 +18,19 @@ */ static inline long atomic64_read(const atomic64_t *v) { - return (*(volatile long *)&(v)->counter); + return (*(volatile const long *)&(v)->counter); +} + +/** + * atomic64_read_unchecked - read atomic64 variable + * @v: pointer of type atomic64_unchecked_t + * + * Atomically reads the value of @v. + * Doesn't imply a read memory barrier. + */ +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) +{ + return (*(volatile const long *)&(v)->counter); } /** @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64 } /** + * atomic64_set_unchecked - set atomic64 variable + * @v: pointer to type atomic64_unchecked_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) +{ + v->counter = i; +} + +/** * atomic64_add - add integer to atomic64 variable * @i: integer value to add * @v: pointer to type atomic64_t @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64 */ static inline void atomic64_add(long i, atomic64_t *v) { + asm volatile(LOCK_PREFIX "addq %1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "subq %1,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=m" (v->counter) + : "er" (i), "m" (v->counter)); +} + +/** + * atomic64_add_unchecked - add integer to atomic64 variable + * @i: integer value to add + * @v: pointer to type atomic64_unchecked_t + * + * Atomically adds @i to @v. + */ +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v) +{ asm volatile(LOCK_PREFIX "addq %1,%0" : "=m" (v->counter) : "er" (i), "m" (v->counter)); @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, */ static inline void atomic64_sub(long i, atomic64_t *v) { - asm volatile(LOCK_PREFIX "subq %1,%0" + asm volatile(LOCK_PREFIX "subq %1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "addq %1,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=m" (v->counter) + : "er" (i), "m" (v->counter)); +} + +/** + * atomic64_sub_unchecked - subtract the atomic64 variable + * @i: integer value to subtract + * @v: pointer to type atomic64_unchecked_t + * + * Atomically subtracts @i from @v. + */ +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "subq %1,%0\n" : "=m" (v->counter) : "er" (i), "m" (v->counter)); } @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test( { unsigned char c; - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" + asm volatile(LOCK_PREFIX "subq %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "addq %2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "=m" (v->counter), "=qm" (c) : "er" (i), "m" (v->counter) : "memory"); return c; @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test( */ static inline void atomic64_inc(atomic64_t *v) { + asm volatile(LOCK_PREFIX "incq %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "decq %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=m" (v->counter) + : "m" (v->counter)); +} + +/** + * atomic64_inc_unchecked - increment atomic64 variable + * @v: pointer to type atomic64_unchecked_t + * + * Atomically increments @v by 1. + */ +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) +{ asm volatile(LOCK_PREFIX "incq %0" : "=m" (v->counter) : "m" (v->counter)); @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64 */ static inline void atomic64_dec(atomic64_t *v) { - asm volatile(LOCK_PREFIX "decq %0" + asm volatile(LOCK_PREFIX "decq %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "incq %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=m" (v->counter) + : "m" (v->counter)); +} + +/** + * atomic64_dec_unchecked - decrement atomic64 variable + * @v: pointer to type atomic64_t + * + * Atomically decrements @v by 1. + */ +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "decq %0\n" : "=m" (v->counter) : "m" (v->counter)); } @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test( { unsigned char c; - asm volatile(LOCK_PREFIX "decq %0; sete %1" + asm volatile(LOCK_PREFIX "decq %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "incq %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "=m" (v->counter), "=qm" (c) : "m" (v->counter) : "memory"); return c != 0; @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test( { unsigned char c; - asm volatile(LOCK_PREFIX "incq %0; sete %1" + asm volatile(LOCK_PREFIX "incq %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "decq %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "=m" (v->counter), "=qm" (c) : "m" (v->counter) : "memory"); return c != 0; @@ -155,7 +292,16 @@ static inline int atomic64_add_negative( { unsigned char c; - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" + asm volatile(LOCK_PREFIX "addq %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "subq %2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sets %1\n" : "=m" (v->counter), "=qm" (c) : "er" (i), "m" (v->counter) : "memory"); return c; @@ -171,7 +317,31 @@ static inline int atomic64_add_negative( static inline long atomic64_add_return(long i, atomic64_t *v) { long __i = i; - asm volatile(LOCK_PREFIX "xaddq %0, %1;" + asm volatile(LOCK_PREFIX "xaddq %0, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "movq %0, %1\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "+r" (i), "+m" (v->counter) + : : "memory"); + return i + __i; +} + +/** + * atomic64_add_return_unchecked - add and return + * @i: integer value to add + * @v: pointer to type atomic64_unchecked_t + * + * Atomically adds @i to @v and returns @i + @v + */ +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v) +{ + long __i = i; + asm volatile(LOCK_PREFIX "xaddq %0, %1" : "+r" (i), "+m" (v->counter) : : "memory"); return i + __i; @@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l } #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) +{ + return atomic64_add_return_unchecked(1, v); +} #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) @@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom return cmpxchg(&v->counter, old, new); } +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new) +{ + return cmpxchg(&v->counter, old, new); +} + static inline long atomic64_xchg(atomic64_t *v, long new) { return xchg(&v->counter, new); @@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6 */ static inline int atomic64_add_unless(atomic64_t *v, long a, long u) { - long c, old; + long c, old, new; c = atomic64_read(v); for (;;) { - if (unlikely(c == (u))) + if (unlikely(c == u)) break; - old = atomic64_cmpxchg((v), c, c + (a)); + + asm volatile("add %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "sub %2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=r" (new) + : "0" (c), "ir" (a)); + + old = atomic64_cmpxchg(v, c, new); if (likely(old == c)) break; c = old; } - return c != (u); + return c != u; } #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) diff -urNp linux-2.6.39.1/arch/x86/include/asm/atomic.h linux-2.6.39.1/arch/x86/include/asm/atomic.h --- linux-2.6.39.1/arch/x86/include/asm/atomic.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/atomic.h 2011-05-22 19:36:30.000000000 -0400 @@ -22,7 +22,18 @@ */ static inline int atomic_read(const atomic_t *v) { - return (*(volatile int *)&(v)->counter); + return (*(volatile const int *)&(v)->counter); +} + +/** + * atomic_read_unchecked - read atomic variable + * @v: pointer of type atomic_unchecked_t + * + * Atomically reads the value of @v. + */ +static inline int atomic_read_unchecked(const atomic_unchecked_t *v) +{ + return (*(volatile const int *)&(v)->counter); } /** @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t * } /** + * atomic_set_unchecked - set atomic variable + * @v: pointer of type atomic_unchecked_t + * @i: required value + * + * Atomically sets the value of @v to @i. + */ +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) +{ + v->counter = i; +} + +/** * atomic_add - add integer to atomic variable * @i: integer value to add * @v: pointer of type atomic_t @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t * */ static inline void atomic_add(int i, atomic_t *v) { - asm volatile(LOCK_PREFIX "addl %1,%0" + asm volatile(LOCK_PREFIX "addl %1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "subl %1,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "+m" (v->counter) + : "ir" (i)); +} + +/** + * atomic_add_unchecked - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_unchecked_t + * + * Atomically adds @i to @v. + */ +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "addl %1,%0\n" : "+m" (v->counter) : "ir" (i)); } @@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato */ static inline void atomic_sub(int i, atomic_t *v) { - asm volatile(LOCK_PREFIX "subl %1,%0" + asm volatile(LOCK_PREFIX "subl %1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "addl %1,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "+m" (v->counter) + : "ir" (i)); +} + +/** + * atomic_sub_unchecked - subtract integer from atomic variable + * @i: integer value to subtract + * @v: pointer of type atomic_unchecked_t + * + * Atomically subtracts @i from @v. + */ +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "subl %1,%0\n" : "+m" (v->counter) : "ir" (i)); } @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in { unsigned char c; - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" + asm volatile(LOCK_PREFIX "subl %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "addl %2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "+m" (v->counter), "=qm" (c) : "ir" (i) : "memory"); return c; @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in */ static inline void atomic_inc(atomic_t *v) { - asm volatile(LOCK_PREFIX "incl %0" + asm volatile(LOCK_PREFIX "incl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "decl %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "+m" (v->counter)); +} + +/** + * atomic_inc_unchecked - increment atomic variable + * @v: pointer of type atomic_unchecked_t + * + * Atomically increments @v by 1. + */ +static inline void atomic_inc_unchecked(atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "incl %0\n" : "+m" (v->counter)); } @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t * */ static inline void atomic_dec(atomic_t *v) { - asm volatile(LOCK_PREFIX "decl %0" + asm volatile(LOCK_PREFIX "decl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "incl %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "+m" (v->counter)); +} + +/** + * atomic_dec_unchecked - decrement atomic variable + * @v: pointer of type atomic_unchecked_t + * + * Atomically decrements @v by 1. + */ +static inline void atomic_dec_unchecked(atomic_unchecked_t *v) +{ + asm volatile(LOCK_PREFIX "decl %0\n" : "+m" (v->counter)); } @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at { unsigned char c; - asm volatile(LOCK_PREFIX "decl %0; sete %1" + asm volatile(LOCK_PREFIX "decl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "incl %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "+m" (v->counter), "=qm" (c) : : "memory"); return c != 0; @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at { unsigned char c; - asm volatile(LOCK_PREFIX "incl %0; sete %1" + asm volatile(LOCK_PREFIX "incl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "decl %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" + : "+m" (v->counter), "=qm" (c) + : : "memory"); + return c != 0; +} + +/** + * atomic_inc_and_test_unchecked - increment and test + * @v: pointer of type atomic_unchecked_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) +{ + unsigned char c; + + asm volatile(LOCK_PREFIX "incl %0\n" + "sete %1\n" : "+m" (v->counter), "=qm" (c) : : "memory"); return c != 0; @@ -157,7 +310,16 @@ static inline int atomic_add_negative(in { unsigned char c; - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" + asm volatile(LOCK_PREFIX "addl %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "subl %2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sets %1\n" : "+m" (v->counter), "=qm" (c) : "ir" (i) : "memory"); return c; @@ -180,6 +342,46 @@ static inline int atomic_add_return(int #endif /* Modern 486+ processor */ __i = i; + asm volatile(LOCK_PREFIX "xaddl %0, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "movl %0, %1\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "+r" (i), "+m" (v->counter) + : : "memory"); + return i + __i; + +#ifdef CONFIG_M386 +no_xadd: /* Legacy 386 processor */ + local_irq_save(flags); + __i = atomic_read(v); + atomic_set(v, i + __i); + local_irq_restore(flags); + return i + __i; +#endif +} + +/** + * atomic_add_return_unchecked - add integer and return + * @v: pointer of type atomic_unchecked_t + * @i: integer value to add + * + * Atomically adds @i to @v and returns @i + @v + */ +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) +{ + int __i; +#ifdef CONFIG_M386 + unsigned long flags; + if (unlikely(boot_cpu_data.x86 <= 3)) + goto no_xadd; +#endif + /* Modern 486+ processor */ + __i = i; asm volatile(LOCK_PREFIX "xaddl %0, %1" : "+r" (i), "+m" (v->counter) : : "memory"); @@ -208,6 +410,10 @@ static inline int atomic_sub_return(int } #define atomic_inc_return(v) (atomic_add_return(1, v)) +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) +{ + return atomic_add_return_unchecked(1, v); +} #define atomic_dec_return(v) (atomic_sub_return(1, v)) static inline int atomic_cmpxchg(atomic_t *v, int old, int new) @@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_ return cmpxchg(&v->counter, old, new); } +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) +{ + return cmpxchg(&v->counter, old, new); +} + static inline int atomic_xchg(atomic_t *v, int new) { return xchg(&v->counter, new); } +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) +{ + return xchg(&v->counter, new); +} + /** * atomic_add_unless - add unless the number is already a given value * @v: pointer of type atomic_t @@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t * */ static inline int atomic_add_unless(atomic_t *v, int a, int u) { - int c, old; + int c, old, new; c = atomic_read(v); for (;;) { - if (unlikely(c == (u))) + if (unlikely(c == u)) break; - old = atomic_cmpxchg((v), c, c + (a)); + + asm volatile("addl %2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "subl %2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=r" (new) + : "0" (c), "ir" (a)); + + old = atomic_cmpxchg(v, c, new); if (likely(old == c)) break; c = old; } - return c != (u); + return c != u; } #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) +/** + * atomic_inc_not_zero_hint - increment if not null + * @v: pointer of type atomic_t + * @hint: probable value of the atomic before the increment + * + * This version of atomic_inc_not_zero() gives a hint of probable + * value of the atomic. This helps processor to not read the memory + * before doing the atomic read/modify/write cycle, lowering + * number of bus transactions on some arches. + * + * Returns: 0 if increment was not done, 1 otherwise. + */ +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) +{ + int val, c = hint, new; + + /* sanity test, should be removed by compiler if hint is a constant */ + if (!hint) + return atomic_inc_not_zero(v); + + do { + asm volatile("incl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "decl %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + : "=r" (new) + : "0" (c)); + + val = atomic_cmpxchg(v, c, new); + if (val == c) + return 1; + c = val; + } while (c); + + return 0; +} + /* * atomic_dec_if_positive - decrement by 1 if old value positive * @v: pointer of type atomic_t diff -urNp linux-2.6.39.1/arch/x86/include/asm/bitops.h linux-2.6.39.1/arch/x86/include/asm/bitops.h --- linux-2.6.39.1/arch/x86/include/asm/bitops.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/bitops.h 2011-05-22 19:36:30.000000000 -0400 @@ -38,7 +38,7 @@ * a mask operation on a byte. */ #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3)) #define CONST_MASK(nr) (1 << ((nr) & 7)) /** diff -urNp linux-2.6.39.1/arch/x86/include/asm/boot.h linux-2.6.39.1/arch/x86/include/asm/boot.h --- linux-2.6.39.1/arch/x86/include/asm/boot.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/boot.h 2011-05-22 19:36:30.000000000 -0400 @@ -11,10 +11,15 @@ #include /* Physical address where kernel should be loaded. */ -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ + (CONFIG_PHYSICAL_ALIGN - 1)) \ & ~(CONFIG_PHYSICAL_ALIGN - 1)) +#ifndef __ASSEMBLY__ +extern unsigned char __LOAD_PHYSICAL_ADDR[]; +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR) +#endif + /* Minimum kernel alignment, as a power of two */ #ifdef CONFIG_X86_64 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT diff -urNp linux-2.6.39.1/arch/x86/include/asm/cacheflush.h linux-2.6.39.1/arch/x86/include/asm/cacheflush.h --- linux-2.6.39.1/arch/x86/include/asm/cacheflush.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/cacheflush.h 2011-05-22 19:36:30.000000000 -0400 @@ -26,7 +26,7 @@ static inline unsigned long get_page_mem unsigned long pg_flags = pg->flags & _PGMT_MASK; if (pg_flags == _PGMT_DEFAULT) - return -1; + return ~0UL; else if (pg_flags == _PGMT_WC) return _PAGE_CACHE_WC; else if (pg_flags == _PGMT_UC_MINUS) diff -urNp linux-2.6.39.1/arch/x86/include/asm/cache.h linux-2.6.39.1/arch/x86/include/asm/cache.h --- linux-2.6.39.1/arch/x86/include/asm/cache.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/cache.h 2011-05-22 19:36:30.000000000 -0400 @@ -5,12 +5,13 @@ /* L1 cache line size */ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_BYTES (_AC(1,U) << L1_CACHE_SHIFT) #define __read_mostly __attribute__((__section__(".data..read_mostly"))) +#define __read_only __attribute__((__section__(".data..read_only"))) #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) +#define INTERNODE_CACHE_BYTES (_AC(1,U) << INTERNODE_CACHE_SHIFT) #ifdef CONFIG_X86_VSMP #ifdef CONFIG_SMP diff -urNp linux-2.6.39.1/arch/x86/include/asm/checksum_32.h linux-2.6.39.1/arch/x86/include/asm/checksum_32.h --- linux-2.6.39.1/arch/x86/include/asm/checksum_32.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/checksum_32.h 2011-05-22 19:36:30.000000000 -0400 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr); +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst, + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); + +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst, + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); + /* * Note: when you get a NULL pointer exception here this means someone * passed in an incorrect kernel address to one of these functions. @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f int *err_ptr) { might_sleep(); - return csum_partial_copy_generic((__force void *)src, dst, + return csum_partial_copy_generic_from_user((__force void *)src, dst, len, sum, err_ptr, NULL); } @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us { might_sleep(); if (access_ok(VERIFY_WRITE, dst, len)) - return csum_partial_copy_generic(src, (__force void *)dst, + return csum_partial_copy_generic_to_user(src, (__force void *)dst, len, sum, NULL, err_ptr); if (len) diff -urNp linux-2.6.39.1/arch/x86/include/asm/cpufeature.h linux-2.6.39.1/arch/x86/include/asm/cpufeature.h --- linux-2.6.39.1/arch/x86/include/asm/cpufeature.h 2011-06-03 00:04:13.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/cpufeature.h 2011-06-03 00:32:04.000000000 -0400 @@ -351,7 +351,7 @@ static __always_inline __pure bool __sta ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ ".previous\n" - ".section .altinstr_replacement,\"ax\"\n" + ".section .altinstr_replacement,\"a\"\n" "3: movb $1,%0\n" "4:\n" ".previous\n" diff -urNp linux-2.6.39.1/arch/x86/include/asm/desc_defs.h linux-2.6.39.1/arch/x86/include/asm/desc_defs.h --- linux-2.6.39.1/arch/x86/include/asm/desc_defs.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/desc_defs.h 2011-05-22 19:36:30.000000000 -0400 @@ -31,6 +31,12 @@ struct desc_struct { unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; }; + struct { + u16 offset_low; + u16 seg; + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1; + unsigned offset_high: 16; + } gate; }; } __attribute__((packed)); diff -urNp linux-2.6.39.1/arch/x86/include/asm/desc.h linux-2.6.39.1/arch/x86/include/asm/desc.h --- linux-2.6.39.1/arch/x86/include/asm/desc.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/desc.h 2011-05-22 19:36:30.000000000 -0400 @@ -4,6 +4,7 @@ #include #include #include +#include #include static inline void fill_ldt(struct desc_struct *desc, @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_ desc->base1 = (info->base_addr & 0x00ff0000) >> 16; desc->type = (info->read_exec_only ^ 1) << 1; desc->type |= info->contents << 2; + desc->type |= info->seg_not_present ^ 1; desc->s = 1; desc->dpl = 0x3; desc->p = info->seg_not_present ^ 1; @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_ } extern struct desc_ptr idt_descr; -extern gate_desc idt_table[]; - -struct gdt_page { - struct desc_struct gdt[GDT_ENTRIES]; -} __attribute__((aligned(PAGE_SIZE))); -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page); +extern gate_desc idt_table[256]; +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)]; static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) { - return per_cpu(gdt_page, cpu).gdt; + return cpu_gdt_table[cpu]; } #ifdef CONFIG_X86_64 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc * unsigned long base, unsigned dpl, unsigned flags, unsigned short seg) { - gate->a = (seg << 16) | (base & 0xffff); - gate->b = (base & 0xffff0000) | - (((0x80 | type | (dpl << 5)) & 0xff) << 8); + gate->gate.offset_low = base; + gate->gate.seg = seg; + gate->gate.reserved = 0; + gate->gate.type = type; + gate->gate.s = 0; + gate->gate.dpl = dpl; + gate->gate.p = 1; + gate->gate.offset_high = base >> 16; } #endif @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate) { + pax_open_kernel(); memcpy(&idt[entry], gate, sizeof(*gate)); + pax_close_kernel(); } static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc) { + pax_open_kernel(); memcpy(&ldt[entry], desc, 8); + pax_close_kernel(); } static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry, @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr size = sizeof(struct desc_struct); break; } + + pax_open_kernel(); memcpy(&gdt[entry], desc, size); + pax_close_kernel(); } static inline void pack_descriptor(struct desc_struct *desc, unsigned long base, @@ -211,7 +221,9 @@ static inline void native_set_ldt(const static inline void native_load_tr_desc(void) { + pax_open_kernel(); asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); + pax_close_kernel(); } static inline void native_load_gdt(const struct desc_ptr *dtr) @@ -246,8 +258,10 @@ static inline void native_load_tls(struc unsigned int i; struct desc_struct *gdt = get_cpu_gdt_table(cpu); + pax_open_kernel(); for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; + pax_close_kernel(); } #define _LDT_empty(info) \ @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc->limit = (limit >> 16) & 0xf; } -static inline void _set_gate(int gate, unsigned type, void *addr, +static inline void _set_gate(int gate, unsigned type, const void *addr, unsigned dpl, unsigned ist, unsigned seg) { gate_desc s; @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u * Pentium F0 0F bugfix can have resulted in the mapped * IDT being write-protected. */ -static inline void set_intr_gate(unsigned int n, void *addr) +static inline void set_intr_gate(unsigned int n, const void *addr) { BUG_ON((unsigned)n > 0xFF); _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig /* * This routine sets up an interrupt gate at directory privilege level 3. */ -static inline void set_system_intr_gate(unsigned int n, void *addr) +static inline void set_system_intr_gate(unsigned int n, const void *addr) { BUG_ON((unsigned)n > 0xFF); _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS); } -static inline void set_system_trap_gate(unsigned int n, void *addr) +static inline void set_system_trap_gate(unsigned int n, const void *addr) { BUG_ON((unsigned)n > 0xFF); _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS); } -static inline void set_trap_gate(unsigned int n, void *addr) +static inline void set_trap_gate(unsigned int n, const void *addr) { BUG_ON((unsigned)n > 0xFF); _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS); @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne static inline void set_task_gate(unsigned int n, unsigned int gdt_entry) { BUG_ON((unsigned)n > 0xFF); - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3)); + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3)); } -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist) +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist) { BUG_ON((unsigned)n > 0xFF); _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS); } -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist) +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist) { BUG_ON((unsigned)n > 0xFF); _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS); } +#ifdef CONFIG_X86_32 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu) +{ + struct desc_struct d; + + if (likely(limit)) + limit = (limit - 1UL) >> PAGE_SHIFT; + pack_descriptor(&d, base, limit, 0xFB, 0xC); + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S); +} +#endif + #endif /* _ASM_X86_DESC_H */ diff -urNp linux-2.6.39.1/arch/x86/include/asm/device.h linux-2.6.39.1/arch/x86/include/asm/device.h --- linux-2.6.39.1/arch/x86/include/asm/device.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/device.h 2011-05-22 19:36:30.000000000 -0400 @@ -6,7 +6,7 @@ struct dev_archdata { void *acpi_handle; #endif #ifdef CONFIG_X86_64 -struct dma_map_ops *dma_ops; + const struct dma_map_ops *dma_ops; #endif #if defined(CONFIG_DMAR) || defined(CONFIG_AMD_IOMMU) void *iommu; /* hook for IOMMU specific extension */ diff -urNp linux-2.6.39.1/arch/x86/include/asm/dma-mapping.h linux-2.6.39.1/arch/x86/include/asm/dma-mapping.h --- linux-2.6.39.1/arch/x86/include/asm/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/dma-mapping.h 2011-05-22 19:36:30.000000000 -0400 @@ -26,9 +26,9 @@ extern int iommu_merge; extern struct device x86_dma_fallback_dev; extern int panic_on_overflow; -extern struct dma_map_ops *dma_ops; +extern const struct dma_map_ops *dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { #ifdef CONFIG_X86_32 return dma_ops; @@ -45,7 +45,7 @@ static inline struct dma_map_ops *get_dm /* Make sure we keep the same behaviour */ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); if (ops->mapping_error) return ops->mapping_error(dev, dma_addr); @@ -115,7 +115,7 @@ static inline void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); void *memory; gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); @@ -142,7 +142,7 @@ dma_alloc_coherent(struct device *dev, s static inline void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t bus) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); WARN_ON(irqs_disabled()); /* for portability */ diff -urNp linux-2.6.39.1/arch/x86/include/asm/e820.h linux-2.6.39.1/arch/x86/include/asm/e820.h --- linux-2.6.39.1/arch/x86/include/asm/e820.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/e820.h 2011-05-22 19:36:30.000000000 -0400 @@ -69,7 +69,7 @@ struct e820map { #define ISA_START_ADDRESS 0xa0000 #define ISA_END_ADDRESS 0x100000 -#define BIOS_BEGIN 0x000a0000 +#define BIOS_BEGIN 0x000c0000 #define BIOS_END 0x00100000 #define BIOS_ROM_BASE 0xffe00000 diff -urNp linux-2.6.39.1/arch/x86/include/asm/elf.h linux-2.6.39.1/arch/x86/include/asm/elf.h --- linux-2.6.39.1/arch/x86/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/elf.h 2011-05-22 19:36:30.000000000 -0400 @@ -237,7 +237,25 @@ extern int force_personality32; the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ +#ifdef CONFIG_PAX_SEGMEXEC +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2) +#else #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +#endif + +#ifdef CONFIG_PAX_ASLR +#ifdef CONFIG_X86_32 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL + +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16) +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16) +#else +#define PAX_ELF_ET_DYN_BASE 0x400000UL + +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3) +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3) +#endif +#endif /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, @@ -291,8 +309,7 @@ do { \ #define ARCH_DLINFO \ do { \ if (vdso_enabled) \ - NEW_AUX_ENT(AT_SYSINFO_EHDR, \ - (unsigned long)current->mm->context.vdso); \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\ } while (0) #define AT_SYSINFO 32 @@ -303,7 +320,7 @@ do { \ #endif /* !CONFIG_X86_32 */ -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) +#define VDSO_CURRENT_BASE (current->mm->context.vdso) #define VDSO_ENTRY \ ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall)) @@ -317,7 +334,4 @@ extern int arch_setup_additional_pages(s extern int syscall32_setup_pages(struct linux_binprm *, int exstack); #define compat_arch_setup_additional_pages syscall32_setup_pages -extern unsigned long arch_randomize_brk(struct mm_struct *mm); -#define arch_randomize_brk arch_randomize_brk - #endif /* _ASM_X86_ELF_H */ diff -urNp linux-2.6.39.1/arch/x86/include/asm/emergency-restart.h linux-2.6.39.1/arch/x86/include/asm/emergency-restart.h --- linux-2.6.39.1/arch/x86/include/asm/emergency-restart.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/emergency-restart.h 2011-05-22 19:36:30.000000000 -0400 @@ -15,6 +15,6 @@ enum reboot_type { extern enum reboot_type reboot_type; -extern void machine_emergency_restart(void); +extern void machine_emergency_restart(void) __noreturn; #endif /* _ASM_X86_EMERGENCY_RESTART_H */ diff -urNp linux-2.6.39.1/arch/x86/include/asm/futex.h linux-2.6.39.1/arch/x86/include/asm/futex.h --- linux-2.6.39.1/arch/x86/include/asm/futex.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/futex.h 2011-05-22 19:36:30.000000000 -0400 @@ -12,16 +12,18 @@ #include #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ + typecheck(u32 *, uaddr); \ asm volatile("1:\t" insn "\n" \ "2:\t.section .fixup,\"ax\"\n" \ "3:\tmov\t%3, %1\n" \ "\tjmp\t2b\n" \ "\t.previous\n" \ _ASM_EXTABLE(1b, 3b) \ - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\ : "i" (-EFAULT), "0" (oparg), "1" (0)) #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ + typecheck(u32 *, uaddr); \ asm volatile("1:\tmovl %2, %0\n" \ "\tmovl\t%0, %3\n" \ "\t" insn "\n" \ @@ -34,7 +36,7 @@ _ASM_EXTABLE(1b, 4b) \ _ASM_EXTABLE(2b, 4b) \ : "=&a" (oldval), "=&r" (ret), \ - "+m" (*uaddr), "=&r" (tem) \ + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \ : "r" (oparg), "i" (-EFAULT), "1" (0)) static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser switch (op) { case FUTEX_OP_SET: - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg); break; case FUTEX_OP_ADD: - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval, + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval, uaddr, oparg); break; case FUTEX_OP_OR: @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n" "2:\t.section .fixup, \"ax\"\n" "3:\tmov %3, %0\n" "\tjmp 2b\n" "\t.previous\n" _ASM_EXTABLE(1b, 3b) - : "+r" (ret), "=a" (oldval), "+m" (*uaddr) + : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr)) : "i" (-EFAULT), "r" (newval), "1" (oldval) : "memory" ); diff -urNp linux-2.6.39.1/arch/x86/include/asm/hw_irq.h linux-2.6.39.1/arch/x86/include/asm/hw_irq.h --- linux-2.6.39.1/arch/x86/include/asm/hw_irq.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/hw_irq.h 2011-05-22 19:36:30.000000000 -0400 @@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void); extern void enable_IO_APIC(void); /* Statistics */ -extern atomic_t irq_err_count; -extern atomic_t irq_mis_count; +extern atomic_unchecked_t irq_err_count; +extern atomic_unchecked_t irq_mis_count; /* EISA */ extern void eisa_set_level_irq(unsigned int irq); diff -urNp linux-2.6.39.1/arch/x86/include/asm/i387.h linux-2.6.39.1/arch/x86/include/asm/i387.h --- linux-2.6.39.1/arch/x86/include/asm/i387.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/i387.h 2011-05-22 19:36:30.000000000 -0400 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc { int err; +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + if ((unsigned long)fx < PAX_USER_SHADOW_BASE) + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE); +#endif + /* See comment in fxsave() below. */ #ifdef CONFIG_AS_FXSAVEQ asm volatile("1: fxrstorq %[fx]\n\t" @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38 { int err; +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + if ((unsigned long)fx < PAX_USER_SHADOW_BASE) + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE); +#endif + /* * Clear the bytes not touched by the fxsave and reserved * for the SW usage. @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu #endif /* CONFIG_X86_64 */ /* We need a safe address that is cheap to find and that is already - in L1 during context switch. The best choices are unfortunately - different for UP and SMP */ -#ifdef CONFIG_SMP -#define safe_address (__per_cpu_offset[0]) -#else -#define safe_address (kstat_cpu(0).cpustat.user) -#endif + in L1 during context switch. */ +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0) /* * These must be called with preempt disabled @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void struct thread_info *me = current_thread_info(); preempt_disable(); if (me->status & TS_USEDFPU) - __save_init_fpu(me->task); + __save_init_fpu(current); else clts(); } diff -urNp linux-2.6.39.1/arch/x86/include/asm/io.h linux-2.6.39.1/arch/x86/include/asm/io.h --- linux-2.6.39.1/arch/x86/include/asm/io.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/io.h 2011-05-22 19:36:30.000000000 -0400 @@ -216,6 +216,17 @@ extern void set_iounmap_nonlazy(void); #include +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE +static inline int valid_phys_addr_range(unsigned long addr, size_t count) +{ + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0; +} + +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count) +{ + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0; +} + /* * Convert a virtual cached pointer to an uncached pointer */ diff -urNp linux-2.6.39.1/arch/x86/include/asm/iommu.h linux-2.6.39.1/arch/x86/include/asm/iommu.h --- linux-2.6.39.1/arch/x86/include/asm/iommu.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/iommu.h 2011-05-22 19:36:30.000000000 -0400 @@ -1,7 +1,7 @@ #ifndef _ASM_X86_IOMMU_H #define _ASM_X86_IOMMU_H -extern struct dma_map_ops nommu_dma_ops; +extern const struct dma_map_ops nommu_dma_ops; extern int force_iommu, no_iommu; extern int iommu_detected; extern int iommu_pass_through; diff -urNp linux-2.6.39.1/arch/x86/include/asm/irqflags.h linux-2.6.39.1/arch/x86/include/asm/irqflags.h --- linux-2.6.39.1/arch/x86/include/asm/irqflags.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/irqflags.h 2011-05-22 19:36:30.000000000 -0400 @@ -140,6 +140,11 @@ static inline unsigned long arch_local_i sti; \ sysexit +#define GET_CR0_INTO_RDI mov %cr0, %rdi +#define SET_RDI_INTO_CR0 mov %rdi, %cr0 +#define GET_CR3_INTO_RDI mov %cr3, %rdi +#define SET_RDI_INTO_CR3 mov %rdi, %cr3 + #else #define INTERRUPT_RETURN iret #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit diff -urNp linux-2.6.39.1/arch/x86/include/asm/kprobes.h linux-2.6.39.1/arch/x86/include/asm/kprobes.h --- linux-2.6.39.1/arch/x86/include/asm/kprobes.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/kprobes.h 2011-05-22 19:36:30.000000000 -0400 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t; #define RELATIVEJUMP_SIZE 5 #define RELATIVECALL_OPCODE 0xe8 #define RELATIVE_ADDR_SIZE 4 -#define MAX_STACK_SIZE 64 -#define MIN_STACK_SIZE(ADDR) \ - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \ - THREAD_SIZE - (unsigned long)(ADDR))) \ - ? (MAX_STACK_SIZE) \ - : (((unsigned long)current_thread_info()) + \ - THREAD_SIZE - (unsigned long)(ADDR))) +#define MAX_STACK_SIZE 64UL +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR)) #define flush_insn_slot(p) do { } while (0) diff -urNp linux-2.6.39.1/arch/x86/include/asm/kvm_host.h linux-2.6.39.1/arch/x86/include/asm/kvm_host.h --- linux-2.6.39.1/arch/x86/include/asm/kvm_host.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/kvm_host.h 2011-05-22 19:36:30.000000000 -0400 @@ -419,7 +419,7 @@ struct kvm_arch { unsigned int n_used_mmu_pages; unsigned int n_requested_mmu_pages; unsigned int n_max_mmu_pages; - atomic_t invlpg_counter; + atomic_unchecked_t invlpg_counter; struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; /* * Hash table of struct kvm_mmu_page. @@ -599,7 +599,7 @@ struct kvm_arch_async_pf { bool direct_map; }; -extern struct kvm_x86_ops *kvm_x86_ops; +extern const struct kvm_x86_ops *kvm_x86_ops; int kvm_mmu_module_init(void); void kvm_mmu_module_exit(void); diff -urNp linux-2.6.39.1/arch/x86/include/asm/local.h linux-2.6.39.1/arch/x86/include/asm/local.h --- linux-2.6.39.1/arch/x86/include/asm/local.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/local.h 2011-05-22 19:36:30.000000000 -0400 @@ -18,26 +18,58 @@ typedef struct { static inline void local_inc(local_t *l) { - asm volatile(_ASM_INC "%0" + asm volatile(_ASM_INC "%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + _ASM_DEC "%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "+m" (l->a.counter)); } static inline void local_dec(local_t *l) { - asm volatile(_ASM_DEC "%0" + asm volatile(_ASM_DEC "%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + _ASM_INC "%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "+m" (l->a.counter)); } static inline void local_add(long i, local_t *l) { - asm volatile(_ASM_ADD "%1,%0" + asm volatile(_ASM_ADD "%1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + _ASM_SUB "%1,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "+m" (l->a.counter) : "ir" (i)); } static inline void local_sub(long i, local_t *l) { - asm volatile(_ASM_SUB "%1,%0" + asm volatile(_ASM_SUB "%1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + _ASM_ADD "%1,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "+m" (l->a.counter) : "ir" (i)); } @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon { unsigned char c; - asm volatile(_ASM_SUB "%2,%0; sete %1" + asm volatile(_ASM_SUB "%2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + _ASM_ADD "%2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "+m" (l->a.counter), "=qm" (c) : "ir" (i) : "memory"); return c; @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc { unsigned char c; - asm volatile(_ASM_DEC "%0; sete %1" + asm volatile(_ASM_DEC "%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + _ASM_INC "%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "+m" (l->a.counter), "=qm" (c) : : "memory"); return c != 0; @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc { unsigned char c; - asm volatile(_ASM_INC "%0; sete %1" + asm volatile(_ASM_INC "%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + _ASM_DEC "%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sete %1\n" : "+m" (l->a.counter), "=qm" (c) : : "memory"); return c != 0; @@ -110,7 +169,16 @@ static inline int local_add_negative(lon { unsigned char c; - asm volatile(_ASM_ADD "%2,%0; sets %1" + asm volatile(_ASM_ADD "%2,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + _ASM_SUB "%2,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + "sets %1\n" : "+m" (l->a.counter), "=qm" (c) : "ir" (i) : "memory"); return c; @@ -133,7 +201,15 @@ static inline long local_add_return(long #endif /* Modern 486+ processor */ __i = i; - asm volatile(_ASM_XADD "%0, %1;" + asm volatile(_ASM_XADD "%0, %1\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + _ASM_MOV "%0,%1\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "+r" (i), "+m" (l->a.counter) : : "memory"); return i + __i; diff -urNp linux-2.6.39.1/arch/x86/include/asm/mce.h linux-2.6.39.1/arch/x86/include/asm/mce.h --- linux-2.6.39.1/arch/x86/include/asm/mce.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/mce.h 2011-05-22 19:36:30.000000000 -0400 @@ -198,7 +198,7 @@ int mce_notify_irq(void); void mce_notify_process(void); DECLARE_PER_CPU(struct mce, injectm); -extern struct file_operations mce_chrdev_ops; +extern struct file_operations mce_chrdev_ops; /* cannot be const, see arch/x86/kernel/cpu/mcheck/mce. */ /* * Exception handler diff -urNp linux-2.6.39.1/arch/x86/include/asm/microcode.h linux-2.6.39.1/arch/x86/include/asm/microcode.h --- linux-2.6.39.1/arch/x86/include/asm/microcode.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/microcode.h 2011-05-22 19:36:30.000000000 -0400 @@ -12,13 +12,13 @@ struct device; enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; struct microcode_ops { - enum ucode_state (*request_microcode_user) (int cpu, + enum ucode_state (* const request_microcode_user) (int cpu, const void __user *buf, size_t size); - enum ucode_state (*request_microcode_fw) (int cpu, + enum ucode_state (* const request_microcode_fw) (int cpu, struct device *device); - void (*microcode_fini_cpu) (int cpu); + void (* const microcode_fini_cpu) (int cpu); /* * The generic 'microcode_core' part guarantees that @@ -38,16 +38,16 @@ struct ucode_cpu_info { extern struct ucode_cpu_info ucode_cpu_info[]; #ifdef CONFIG_MICROCODE_INTEL -extern struct microcode_ops * __init init_intel_microcode(void); +extern const struct microcode_ops * __init init_intel_microcode(void); #else -static inline struct microcode_ops * __init init_intel_microcode(void) +static inline const struct microcode_ops * __init init_intel_microcode(void) { return NULL; } #endif /* CONFIG_MICROCODE_INTEL */ #ifdef CONFIG_MICROCODE_AMD -extern struct microcode_ops * __init init_amd_microcode(void); +extern const struct microcode_ops * __init init_amd_microcode(void); static inline void get_ucode_data(void *to, const u8 *from, size_t n) { @@ -55,7 +55,7 @@ static inline void get_ucode_data(void * } #else -static inline struct microcode_ops * __init init_amd_microcode(void) +static inline const struct microcode_ops * __init init_amd_microcode(void) { return NULL; } diff -urNp linux-2.6.39.1/arch/x86/include/asm/mman.h linux-2.6.39.1/arch/x86/include/asm/mman.h --- linux-2.6.39.1/arch/x86/include/asm/mman.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/mman.h 2011-05-22 19:36:30.000000000 -0400 @@ -5,4 +5,14 @@ #include +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +#ifdef CONFIG_X86_32 +#define arch_mmap_check i386_mmap_check +int i386_mmap_check(unsigned long addr, unsigned long len, + unsigned long flags); +#endif +#endif +#endif + #endif /* _ASM_X86_MMAN_H */ diff -urNp linux-2.6.39.1/arch/x86/include/asm/mmu_context.h linux-2.6.39.1/arch/x86/include/asm/mmu_context.h --- linux-2.6.39.1/arch/x86/include/asm/mmu_context.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/mmu_context.h 2011-05-22 19:36:30.000000000 -0400 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + unsigned int i; + pgd_t *pgd; + + pax_open_kernel(); + pgd = get_cpu_pgd(smp_processor_id()); + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i) + if (paravirt_enabled()) + set_pgd(pgd+i, native_make_pgd(0)); + else + pgd[i] = native_make_pgd(0); + pax_close_kernel(); +#endif + #ifdef CONFIG_SMP if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); @@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s struct task_struct *tsk) { unsigned cpu = smp_processor_id(); +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) + int tlbstate = TLBSTATE_OK; +#endif if (likely(prev != next)) { #ifdef CONFIG_SMP +#ifdef CONFIG_X86_32 + tlbstate = percpu_read(cpu_tlbstate.state); +#endif percpu_write(cpu_tlbstate.state, TLBSTATE_OK); percpu_write(cpu_tlbstate.active_mm, next); #endif cpumask_set_cpu(cpu, mm_cpumask(next)); /* Re-load page tables */ +#ifdef CONFIG_PAX_PER_CPU_PGD + pax_open_kernel(); + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS); + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS); + pax_close_kernel(); + load_cr3(get_cpu_pgd(cpu)); +#else load_cr3(next->pgd); +#endif /* stop flush ipis for the previous mm */ cpumask_clear_cpu(cpu, mm_cpumask(prev)); @@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s */ if (unlikely(prev->context.ldt != next->context.ldt)) load_LDT_nolock(&next->context); - } + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) + if (!(__supported_pte_mask & _PAGE_NX)) { + smp_mb__before_clear_bit(); + cpu_clear(cpu, prev->context.cpu_user_cs_mask); + smp_mb__after_clear_bit(); + cpu_set(cpu, next->context.cpu_user_cs_mask); + } +#endif + +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base || + prev->context.user_cs_limit != next->context.user_cs_limit)) + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); #ifdef CONFIG_SMP + else if (unlikely(tlbstate != TLBSTATE_OK)) + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); +#endif +#endif + + } else { + +#ifdef CONFIG_PAX_PER_CPU_PGD + pax_open_kernel(); + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS); + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS); + pax_close_kernel(); + load_cr3(get_cpu_pgd(cpu)); +#endif + +#ifdef CONFIG_SMP percpu_write(cpu_tlbstate.state, TLBSTATE_OK); BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); @@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s * tlb flush IPI delivery. We must reload CR3 * to make sure to use no freed page tables. */ + +#ifndef CONFIG_PAX_PER_CPU_PGD load_cr3(next->pgd); +#endif + load_LDT_nolock(&next->context); + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) + if (!(__supported_pte_mask & _PAGE_NX)) + cpu_set(cpu, next->context.cpu_user_cs_mask); +#endif + +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) +#ifdef CONFIG_PAX_PAGEEXEC + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX))) +#endif + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); +#endif + } - } #endif + } } #define activate_mm(prev, next) \ diff -urNp linux-2.6.39.1/arch/x86/include/asm/mmu.h linux-2.6.39.1/arch/x86/include/asm/mmu.h --- linux-2.6.39.1/arch/x86/include/asm/mmu.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/mmu.h 2011-05-22 19:36:30.000000000 -0400 @@ -9,10 +9,22 @@ * we put the segment information here. */ typedef struct { - void *ldt; + struct desc_struct *ldt; int size; struct mutex lock; - void *vdso; + unsigned long vdso; + +#ifdef CONFIG_X86_32 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + unsigned long user_cs_base; + unsigned long user_cs_limit; + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) + cpumask_t cpu_user_cs_mask; +#endif + +#endif +#endif #ifdef CONFIG_X86_64 /* True if mm supports a task running in 32 bit compatibility mode. */ diff -urNp linux-2.6.39.1/arch/x86/include/asm/module.h linux-2.6.39.1/arch/x86/include/asm/module.h --- linux-2.6.39.1/arch/x86/include/asm/module.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/module.h 2011-05-22 19:41:32.000000000 -0400 @@ -5,6 +5,7 @@ #ifdef CONFIG_X86_64 /* X86_64 does not define MODULE_PROC_FAMILY */ +#define MODULE_PROC_FAMILY "" #elif defined CONFIG_M386 #define MODULE_PROC_FAMILY "386 " #elif defined CONFIG_M486 @@ -59,8 +60,30 @@ #error unknown processor family #endif -#ifdef CONFIG_X86_32 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY +#ifdef CONFIG_PAX_MEMORY_UDEREF +#define MODULE_PAX_UDEREF "UDEREF " +#else +#define MODULE_PAX_UDEREF "" +#endif + +#ifdef CONFIG_PAX_KERNEXEC +#define MODULE_PAX_KERNEXEC "KERNEXEC " +#else +#define MODULE_PAX_KERNEXEC "" #endif +#ifdef CONFIG_PAX_REFCOUNT +#define MODULE_PAX_REFCOUNT "REFCOUNT " +#else +#define MODULE_PAX_REFCOUNT "" +#endif + +#ifdef CONFIG_GRKERNSEC +#define MODULE_GRSEC "GRSECURITY " +#else +#define MODULE_GRSEC "" +#endif + +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT + #endif /* _ASM_X86_MODULE_H */ diff -urNp linux-2.6.39.1/arch/x86/include/asm/page_64_types.h linux-2.6.39.1/arch/x86/include/asm/page_64_types.h --- linux-2.6.39.1/arch/x86/include/asm/page_64_types.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/page_64_types.h 2011-05-22 19:36:30.000000000 -0400 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from); /* duplicated to the one in bootmem.h */ extern unsigned long max_pfn; -extern unsigned long phys_base; +extern const unsigned long phys_base; extern unsigned long __phys_addr(unsigned long); #define __phys_reloc_hide(x) (x) diff -urNp linux-2.6.39.1/arch/x86/include/asm/paravirt.h linux-2.6.39.1/arch/x86/include/asm/paravirt.h --- linux-2.6.39.1/arch/x86/include/asm/paravirt.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/paravirt.h 2011-05-22 19:36:30.000000000 -0400 @@ -739,6 +739,21 @@ static inline void __set_fixmap(unsigned pv_mmu_ops.set_fixmap(idx, phys, flags); } +#ifdef CONFIG_PAX_KERNEXEC +static inline unsigned long pax_open_kernel(void) +{ + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel); +} + +static inline unsigned long pax_close_kernel(void) +{ + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel); +} +#else +static inline unsigned long pax_open_kernel(void) { return 0; } +static inline unsigned long pax_close_kernel(void) { return 0; } +#endif + #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) static inline int arch_spin_is_locked(struct arch_spinlock *lock) @@ -955,7 +970,7 @@ extern void default_banner(void); #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) -#define PARA_INDIRECT(addr) *%cs:addr +#define PARA_INDIRECT(addr) *%ss:addr #endif #define INTERRUPT_RETURN \ @@ -1032,6 +1047,21 @@ extern void default_banner(void); PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \ CLBR_NONE, \ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit)) + +#define GET_CR0_INTO_RDI \ + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ + mov %rax,%rdi + +#define SET_RDI_INTO_CR0 \ + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0) + +#define GET_CR3_INTO_RDI \ + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \ + mov %rax,%rdi + +#define SET_RDI_INTO_CR3 \ + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3) + #endif /* CONFIG_X86_32 */ #endif /* __ASSEMBLY__ */ diff -urNp linux-2.6.39.1/arch/x86/include/asm/paravirt_types.h linux-2.6.39.1/arch/x86/include/asm/paravirt_types.h --- linux-2.6.39.1/arch/x86/include/asm/paravirt_types.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/paravirt_types.h 2011-05-22 19:36:30.000000000 -0400 @@ -317,6 +317,12 @@ struct pv_mmu_ops { an mfn. We can tell which is which from the index. */ void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, phys_addr_t phys, pgprot_t flags); + +#ifdef CONFIG_PAX_KERNEXEC + unsigned long (*pax_open_kernel)(void); + unsigned long (*pax_close_kernel)(void); +#endif + }; struct arch_spinlock; diff -urNp linux-2.6.39.1/arch/x86/include/asm/pci_x86.h linux-2.6.39.1/arch/x86/include/asm/pci_x86.h --- linux-2.6.39.1/arch/x86/include/asm/pci_x86.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/pci_x86.h 2011-05-22 19:36:30.000000000 -0400 @@ -93,16 +93,16 @@ extern int (*pcibios_enable_irq)(struct extern void (*pcibios_disable_irq)(struct pci_dev *dev); struct pci_raw_ops { - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn, int reg, int len, u32 *val); - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn, + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn, int reg, int len, u32 val); }; -extern struct pci_raw_ops *raw_pci_ops; -extern struct pci_raw_ops *raw_pci_ext_ops; +extern const struct pci_raw_ops *raw_pci_ops; +extern const struct pci_raw_ops *raw_pci_ext_ops; -extern struct pci_raw_ops pci_direct_conf1; +extern const struct pci_raw_ops pci_direct_conf1; extern bool port_cf9_safe; /* arch_initcall level */ diff -urNp linux-2.6.39.1/arch/x86/include/asm/pgalloc.h linux-2.6.39.1/arch/x86/include/asm/pgalloc.h --- linux-2.6.39.1/arch/x86/include/asm/pgalloc.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/pgalloc.h 2011-05-22 19:36:30.000000000 -0400 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s pmd_t *pmd, pte_t *pte) { paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); +} + +static inline void pmd_populate_user(struct mm_struct *mm, + pmd_t *pmd, pte_t *pte) +{ + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); } diff -urNp linux-2.6.39.1/arch/x86/include/asm/pgtable-2level.h linux-2.6.39.1/arch/x86/include/asm/pgtable-2level.h --- linux-2.6.39.1/arch/x86/include/asm/pgtable-2level.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/pgtable-2level.h 2011-05-22 19:36:30.000000000 -0400 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { + pax_open_kernel(); *pmdp = pmd; + pax_close_kernel(); } static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) diff -urNp linux-2.6.39.1/arch/x86/include/asm/pgtable_32.h linux-2.6.39.1/arch/x86/include/asm/pgtable_32.h --- linux-2.6.39.1/arch/x86/include/asm/pgtable_32.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/pgtable_32.h 2011-05-22 19:36:30.000000000 -0400 @@ -25,9 +25,6 @@ struct mm_struct; struct vm_area_struct; -extern pgd_t swapper_pg_dir[1024]; -extern pgd_t initial_page_table[1024]; - static inline void pgtable_cache_init(void) { } static inline void check_pgt_cache(void) { } void paging_init(void); @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u # include #endif +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; +extern pgd_t initial_page_table[PTRS_PER_PGD]; +#ifdef CONFIG_X86_PAE +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD]; +#endif + #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \ @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u /* Clear a kernel PTE and flush it from the TLB */ #define kpte_clear_flush(ptep, vaddr) \ do { \ + pax_open_kernel(); \ pte_clear(&init_mm, (vaddr), (ptep)); \ + pax_close_kernel(); \ __flush_tlb_one((vaddr)); \ } while (0) @@ -74,6 +79,9 @@ do { \ #endif /* !__ASSEMBLY__ */ +#define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN + /* * kern_addr_valid() is (1) for FLATMEM and (0) for * SPARSEMEM and DISCONTIGMEM diff -urNp linux-2.6.39.1/arch/x86/include/asm/pgtable_32_types.h linux-2.6.39.1/arch/x86/include/asm/pgtable_32_types.h --- linux-2.6.39.1/arch/x86/include/asm/pgtable_32_types.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/pgtable_32_types.h 2011-05-22 19:36:30.000000000 -0400 @@ -8,7 +8,7 @@ */ #ifdef CONFIG_X86_PAE # include -# define PMD_SIZE (1UL << PMD_SHIFT) +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) # define PMD_MASK (~(PMD_SIZE - 1)) #else # include @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) #endif +#ifdef CONFIG_PAX_KERNEXEC +#ifndef __ASSEMBLY__ +extern unsigned char MODULES_EXEC_VADDR[]; +extern unsigned char MODULES_EXEC_END[]; +#endif +#include +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET) +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET) +#else +#define ktla_ktva(addr) (addr) +#define ktva_ktla(addr) (addr) +#endif + #define MODULES_VADDR VMALLOC_START #define MODULES_END VMALLOC_END #define MODULES_LEN (MODULES_VADDR - MODULES_END) diff -urNp linux-2.6.39.1/arch/x86/include/asm/pgtable-3level.h linux-2.6.39.1/arch/x86/include/asm/pgtable-3level.h --- linux-2.6.39.1/arch/x86/include/asm/pgtable-3level.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/pgtable-3level.h 2011-05-22 19:36:30.000000000 -0400 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { + pax_open_kernel(); set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); + pax_close_kernel(); } static inline void native_set_pud(pud_t *pudp, pud_t pud) { + pax_open_kernel(); set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); + pax_close_kernel(); } /* diff -urNp linux-2.6.39.1/arch/x86/include/asm/pgtable_64.h linux-2.6.39.1/arch/x86/include/asm/pgtable_64.h --- linux-2.6.39.1/arch/x86/include/asm/pgtable_64.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/pgtable_64.h 2011-05-22 19:36:30.000000000 -0400 @@ -16,10 +16,13 @@ extern pud_t level3_kernel_pgt[512]; extern pud_t level3_ident_pgt[512]; +extern pud_t level3_vmalloc_pgt[512]; +extern pud_t level3_vmemmap_pgt[512]; +extern pud_t level2_vmemmap_pgt[512]; extern pmd_t level2_kernel_pgt[512]; extern pmd_t level2_fixmap_pgt[512]; -extern pmd_t level2_ident_pgt[512]; -extern pgd_t init_level4_pgt[]; +extern pmd_t level2_ident_pgt[512*2]; +extern pgd_t init_level4_pgt[512]; #define swapper_pg_dir init_level4_pgt @@ -61,7 +64,9 @@ static inline void native_set_pte_atomic static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) { + pax_open_kernel(); *pmdp = pmd; + pax_close_kernel(); } static inline void native_pmd_clear(pmd_t *pmd) @@ -107,7 +112,9 @@ static inline void native_pud_clear(pud_ static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) { + pax_open_kernel(); *pgdp = pgd; + pax_close_kernel(); } static inline void native_pgd_clear(pgd_t *pgd) diff -urNp linux-2.6.39.1/arch/x86/include/asm/pgtable_64_types.h linux-2.6.39.1/arch/x86/include/asm/pgtable_64_types.h --- linux-2.6.39.1/arch/x86/include/asm/pgtable_64_types.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/pgtable_64_types.h 2011-05-22 19:36:30.000000000 -0400 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t; #define MODULES_VADDR _AC(0xffffffffa0000000, UL) #define MODULES_END _AC(0xffffffffff000000, UL) #define MODULES_LEN (MODULES_END - MODULES_VADDR) +#define MODULES_EXEC_VADDR MODULES_VADDR +#define MODULES_EXEC_END MODULES_END + +#define ktla_ktva(addr) (addr) +#define ktva_ktla(addr) (addr) #endif /* _ASM_X86_PGTABLE_64_DEFS_H */ diff -urNp linux-2.6.39.1/arch/x86/include/asm/pgtable.h linux-2.6.39.1/arch/x86/include/asm/pgtable.h --- linux-2.6.39.1/arch/x86/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/pgtable.h 2011-05-22 19:36:30.000000000 -0400 @@ -81,12 +81,51 @@ extern struct mm_struct *pgd_page_get_mm #define arch_end_context_switch(prev) do {} while(0) +#define pax_open_kernel() native_pax_open_kernel() +#define pax_close_kernel() native_pax_close_kernel() #endif /* CONFIG_PARAVIRT */ +#define __HAVE_ARCH_PAX_OPEN_KERNEL +#define __HAVE_ARCH_PAX_CLOSE_KERNEL + +#ifdef CONFIG_PAX_KERNEXEC +static inline unsigned long native_pax_open_kernel(void) +{ + unsigned long cr0; + + preempt_disable(); + barrier(); + cr0 = read_cr0() ^ X86_CR0_WP; + BUG_ON(unlikely(cr0 & X86_CR0_WP)); + write_cr0(cr0); + return cr0 ^ X86_CR0_WP; +} + +static inline unsigned long native_pax_close_kernel(void) +{ + unsigned long cr0; + + cr0 = read_cr0() ^ X86_CR0_WP; + BUG_ON(unlikely(!(cr0 & X86_CR0_WP))); + write_cr0(cr0); + barrier(); + preempt_enable_no_resched(); + return cr0 ^ X86_CR0_WP; +} +#else +static inline unsigned long native_pax_open_kernel(void) { return 0; } +static inline unsigned long native_pax_close_kernel(void) { return 0; } +#endif + /* * The following only work if pte_present() is true. * Undefined behaviour if not.. */ +static inline int pte_user(pte_t pte) +{ + return pte_val(pte) & _PAGE_USER; +} + static inline int pte_dirty(pte_t pte) { return pte_flags(pte) & _PAGE_DIRTY; @@ -196,9 +235,29 @@ static inline pte_t pte_wrprotect(pte_t return pte_clear_flags(pte, _PAGE_RW); } +static inline pte_t pte_mkread(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_USER); +} + static inline pte_t pte_mkexec(pte_t pte) { - return pte_clear_flags(pte, _PAGE_NX); +#ifdef CONFIG_X86_PAE + if (__supported_pte_mask & _PAGE_NX) + return pte_clear_flags(pte, _PAGE_NX); + else +#endif + return pte_set_flags(pte, _PAGE_USER); +} + +static inline pte_t pte_exprotect(pte_t pte) +{ +#ifdef CONFIG_X86_PAE + if (__supported_pte_mask & _PAGE_NX) + return pte_set_flags(pte, _PAGE_NX); + else +#endif + return pte_clear_flags(pte, _PAGE_USER); } static inline pte_t pte_mkdirty(pte_t pte) @@ -390,6 +449,15 @@ pte_t *populate_extra_pte(unsigned long #endif #ifndef __ASSEMBLY__ + +#ifdef CONFIG_PAX_PER_CPU_PGD +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD]; +static inline pgd_t *get_cpu_pgd(unsigned int cpu) +{ + return cpu_pgd[cpu]; +} +#endif + #include static inline int pte_none(pte_t pte) @@ -560,7 +628,7 @@ static inline pud_t *pud_offset(pgd_t *p static inline int pgd_bad(pgd_t pgd) { - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE; } static inline int pgd_none(pgd_t pgd) @@ -583,7 +651,12 @@ static inline int pgd_none(pgd_t pgd) * pgd_offset() returns a (pgd_t *) * pgd_index() is used get the offset into the pgd page's array of pgd_t's; */ -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) + +#ifdef CONFIG_PAX_PER_CPU_PGD +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address)) +#endif + /* * a shortcut which implies the use of the kernel's pgd, instead * of a process's @@ -594,6 +667,20 @@ static inline int pgd_none(pgd_t pgd) #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) +#ifdef CONFIG_X86_32 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY +#else +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT)) + +#ifdef CONFIG_PAX_MEMORY_UDEREF +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT) +#else +#define PAX_USER_SHADOW_BASE (_AC(0,UL)) +#endif + +#endif + #ifndef __ASSEMBLY__ extern int direct_gbpages; @@ -758,11 +845,23 @@ static inline void pmdp_set_wrprotect(st * dst and src can be on the same page, but the range must not overlap, * and must not cross a page boundary. */ -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count) { - memcpy(dst, src, count * sizeof(pgd_t)); + pax_open_kernel(); + while (count--) + *dst++ = *src++; + pax_close_kernel(); } +#ifdef CONFIG_PAX_PER_CPU_PGD +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count); +#endif + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count); +#else +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {} +#endif #include #endif /* __ASSEMBLY__ */ diff -urNp linux-2.6.39.1/arch/x86/include/asm/pgtable_types.h linux-2.6.39.1/arch/x86/include/asm/pgtable_types.h --- linux-2.6.39.1/arch/x86/include/asm/pgtable_types.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/pgtable_types.h 2011-05-22 19:36:30.000000000 -0400 @@ -16,13 +16,12 @@ #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ #define _PAGE_BIT_PAT 7 /* on 4KB pages */ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */ +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */ #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */ #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */ +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ /* If _PAGE_BIT_PRESENT is clear, we use these: */ @@ -40,7 +39,6 @@ #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY) #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE) #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1) #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP) #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) @@ -57,8 +55,10 @@ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) -#else +#elif defined(CONFIG_KMEMCHECK) #define _PAGE_NX (_AT(pteval_t, 0)) +#else +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) #endif #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) @@ -96,6 +96,9 @@ #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ _PAGE_ACCESSED) +#define PAGE_READONLY_NOEXEC PAGE_READONLY +#define PAGE_SHARED_NOEXEC PAGE_SHARED + #define __PAGE_KERNEL_EXEC \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL) #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) @@ -106,8 +109,8 @@ #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER) +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER) #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE) #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) @@ -166,8 +169,8 @@ * bits are combined, this will alow user to access the high address mapped * VDSO in the presence of CONFIG_COMPAT_VDSO */ -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */ -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */ +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ #endif @@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p { return native_pgd_val(pgd) & PTE_FLAGS_MASK; } +#endif +#if PAGETABLE_LEVELS == 3 +#include +#endif + +#if PAGETABLE_LEVELS == 2 +#include +#endif + +#ifndef __ASSEMBLY__ #if PAGETABLE_LEVELS > 3 typedef struct { pudval_t pud; } pud_t; @@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu return pud.pud; } #else -#include - static inline pudval_t native_pud_val(pud_t pud) { return native_pgd_val(pud.pgd); @@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm return pmd.pmd; } #else -#include - static inline pmdval_t native_pmd_val(pmd_t pmd) { return native_pgd_val(pmd.pud.pgd); @@ -281,7 +290,6 @@ typedef struct page *pgtable_t; extern pteval_t __supported_pte_mask; extern void set_nx(void); -extern int nx_enabled; #define pgprot_writecombine pgprot_writecombine extern pgprot_t pgprot_writecombine(pgprot_t prot); diff -urNp linux-2.6.39.1/arch/x86/include/asm/processor.h linux-2.6.39.1/arch/x86/include/asm/processor.h --- linux-2.6.39.1/arch/x86/include/asm/processor.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/processor.h 2011-05-22 19:36:30.000000000 -0400 @@ -266,7 +266,7 @@ struct tss_struct { } ____cacheline_aligned; -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); +extern struct tss_struct init_tss[NR_CPUS]; /* * Save the original ist values for checking stack pointers during debugging @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co */ #define TASK_SIZE PAGE_OFFSET #define TASK_SIZE_MAX TASK_SIZE + +#ifdef CONFIG_PAX_SEGMEXEC +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2) +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE) +#else #define STACK_TOP TASK_SIZE -#define STACK_TOP_MAX STACK_TOP +#endif + +#define STACK_TOP_MAX TASK_SIZE #define INIT_THREAD { \ - .sp0 = sizeof(init_stack) + (long)&init_stack, \ + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \ .vm86_info = NULL, \ .sysenter_cs = __KERNEL_CS, \ .io_bitmap_ptr = NULL, \ @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co */ #define INIT_TSS { \ .x86_tss = { \ - .sp0 = sizeof(init_stack) + (long)&init_stack, \ + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \ .ss0 = __KERNEL_DS, \ .ss1 = __KERNEL_CS, \ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co extern unsigned long thread_saved_pc(struct task_struct *tsk); #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) -#define KSTK_TOP(info) \ -({ \ - unsigned long *__ptr = (unsigned long *)(info); \ - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ -}) +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0) /* * The below -8 is to reserve 8 bytes on top of the ring0 stack. @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str #define task_pt_regs(task) \ ({ \ struct pt_regs *__regs__; \ - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ + __regs__ = (struct pt_regs *)((task)->thread.sp0); \ __regs__ - 1; \ }) @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str /* * User space process size. 47bits minus one guard page. */ -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ - 0xc0000000 : 0xFFFFe000) + 0xc0000000 : 0xFFFFf000) #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ IA32_PAGE_OFFSET : TASK_SIZE_MAX) @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str #define STACK_TOP_MAX TASK_SIZE_MAX #define INIT_THREAD { \ - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \ } #define INIT_TSS { \ - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \ } /* @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs */ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) +#ifdef CONFIG_PAX_SEGMEXEC +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3)) +#endif + #define KSTK_EIP(task) (task_pt_regs(task)->ip) /* Get/set a process' ability to use the timestamp counter instruction */ diff -urNp linux-2.6.39.1/arch/x86/include/asm/ptrace.h linux-2.6.39.1/arch/x86/include/asm/ptrace.h --- linux-2.6.39.1/arch/x86/include/asm/ptrace.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/ptrace.h 2011-05-22 19:36:30.000000000 -0400 @@ -152,28 +152,29 @@ static inline unsigned long regs_return_ } /* - * user_mode_vm(regs) determines whether a register set came from user mode. + * user_mode(regs) determines whether a register set came from user mode. * This is true if V8086 mode was enabled OR if the register set was from * protected mode with RPL-3 CS value. This tricky test checks that with * one comparison. Many places in the kernel can bypass this full check - * if they have already ruled out V8086 mode, so user_mode(regs) can be used. + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can + * be used. */ -static inline int user_mode(struct pt_regs *regs) +static inline int user_mode_novm(struct pt_regs *regs) { #ifdef CONFIG_X86_32 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; #else - return !!(regs->cs & 3); + return !!(regs->cs & SEGMENT_RPL_MASK); #endif } -static inline int user_mode_vm(struct pt_regs *regs) +static inline int user_mode(struct pt_regs *regs) { #ifdef CONFIG_X86_32 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL; #else - return user_mode(regs); + return user_mode_novm(regs); #endif } diff -urNp linux-2.6.39.1/arch/x86/include/asm/reboot.h linux-2.6.39.1/arch/x86/include/asm/reboot.h --- linux-2.6.39.1/arch/x86/include/asm/reboot.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/reboot.h 2011-05-22 19:36:30.000000000 -0400 @@ -6,19 +6,19 @@ struct pt_regs; struct machine_ops { - void (*restart)(char *cmd); - void (*halt)(void); - void (*power_off)(void); + void (* __noreturn restart)(char *cmd); + void (* __noreturn halt)(void); + void (* __noreturn power_off)(void); void (*shutdown)(void); void (*crash_shutdown)(struct pt_regs *); - void (*emergency_restart)(void); + void (* __noreturn emergency_restart)(void); }; extern struct machine_ops machine_ops; void native_machine_crash_shutdown(struct pt_regs *regs); void native_machine_shutdown(void); -void machine_real_restart(unsigned int type); +void machine_real_restart(unsigned int type) __noreturn; /* These must match dispatch_table in reboot_32.S */ #define MRR_BIOS 0 #define MRR_APM 1 diff -urNp linux-2.6.39.1/arch/x86/include/asm/rwsem.h linux-2.6.39.1/arch/x86/include/asm/rwsem.h --- linux-2.6.39.1/arch/x86/include/asm/rwsem.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/rwsem.h 2011-05-22 19:36:30.000000000 -0400 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw { asm volatile("# beginning down_read\n\t" LOCK_PREFIX _ASM_INC "(%1)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX _ASM_DEC "(%1)\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + /* adds 0x00000001 */ " jns 1f\n" " call call_rwsem_down_read_failed\n" @@ -85,6 +93,14 @@ static inline int __down_read_trylock(st "1:\n\t" " mov %1,%2\n\t" " add %3,%2\n\t" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "sub %3,%2\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + " jle 2f\n\t" LOCK_PREFIX " cmpxchg %2,%0\n\t" " jnz 1b\n\t" @@ -104,6 +120,14 @@ static inline void __down_write_nested(s long tmp; asm volatile("# beginning down_write\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "mov %1,(%2)\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + /* adds 0xffff0001, returns the old value */ " test %1,%1\n\t" /* was the count 0 before? */ @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s long tmp; asm volatile("# beginning __up_read\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "mov %1,(%2)\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + /* subtracts 1, returns the old value */ " jns 1f\n\t" " call call_rwsem_wake\n" /* expects old value in %edx */ @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_ long tmp; asm volatile("# beginning __up_write\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "mov %1,(%2)\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + /* subtracts 0xffff0001, returns the old value */ " jns 1f\n\t" " call call_rwsem_wake\n" /* expects old value in %edx */ @@ -176,6 +216,14 @@ static inline void __downgrade_write(str { asm volatile("# beginning __downgrade_write\n\t" LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX _ASM_SUB "%2,(%1)\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + /* * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) @@ -194,7 +242,15 @@ static inline void __downgrade_write(str */ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) { - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX _ASM_SUB "%1,%0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "+m" (sem->count) : "er" (delta)); } @@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l { long tmp = delta; - asm volatile(LOCK_PREFIX "xadd %0,%1" + asm volatile(LOCK_PREFIX "xadd %0,%1\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + "mov %0,%1\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "+r" (tmp), "+m" (sem->count) : : "memory"); diff -urNp linux-2.6.39.1/arch/x86/include/asm/segment.h linux-2.6.39.1/arch/x86/include/asm/segment.h --- linux-2.6.39.1/arch/x86/include/asm/segment.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/segment.h 2011-05-22 19:36:30.000000000 -0400 @@ -64,8 +64,8 @@ * 26 - ESPFIX small SS * 27 - per-cpu [ offset to per-cpu data area ] * 28 - stack_canary-20 [ for stack protector ] - * 29 - unused - * 30 - unused + * 29 - PCI BIOS CS + * 30 - PCI BIOS DS * 31 - TSS for double fault handler */ #define GDT_ENTRY_TLS_MIN 6 @@ -79,6 +79,8 @@ #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0) +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4) + #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1) #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4) @@ -104,6 +106,12 @@ #define __KERNEL_STACK_CANARY 0 #endif +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17) +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8) + +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18) +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8) + #define GDT_ENTRY_DOUBLEFAULT_TSS 31 /* @@ -141,7 +149,7 @@ */ /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16) #else @@ -165,6 +173,8 @@ #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3) #define __USER32_DS __USER_DS +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7 + #define GDT_ENTRY_TSS 8 /* needs two entries */ #define GDT_ENTRY_LDT 10 /* needs two entries */ #define GDT_ENTRY_TLS_MIN 12 @@ -185,6 +195,7 @@ #endif #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8) +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8) #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8) #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3) #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3) diff -urNp linux-2.6.39.1/arch/x86/include/asm/smp.h linux-2.6.39.1/arch/x86/include/asm/smp.h --- linux-2.6.39.1/arch/x86/include/asm/smp.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/smp.h 2011-05-22 19:36:30.000000000 -0400 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_ /* cpus sharing the last level cache: */ DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map); DECLARE_PER_CPU(u16, cpu_llc_id); -DECLARE_PER_CPU(int, cpu_number); +DECLARE_PER_CPU(unsigned int, cpu_number); static inline struct cpumask *cpu_sibling_mask(int cpu) { @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd extern int safe_smp_processor_id(void); #elif defined(CONFIG_X86_64_SMP) -#define raw_smp_processor_id() (percpu_read(cpu_number)) - -#define stack_smp_processor_id() \ -({ \ - struct thread_info *ti; \ - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ - ti->cpu; \ -}) +#define raw_smp_processor_id() (percpu_read(cpu_number)) +#define stack_smp_processor_id() raw_smp_processor_id() #define safe_smp_processor_id() smp_processor_id() #endif diff -urNp linux-2.6.39.1/arch/x86/include/asm/spinlock.h linux-2.6.39.1/arch/x86/include/asm/spinlock.h --- linux-2.6.39.1/arch/x86/include/asm/spinlock.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/spinlock.h 2011-05-22 19:36:30.000000000 -0400 @@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar static inline void arch_read_lock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX " addl $1,(%0)\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + "jns 1f\n" "call __read_lock_failed\n\t" "1:\n" @@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r static inline void arch_write_lock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX " addl %1,(%0)\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + "jz 1f\n" "call __write_lock_failed\n\t" "1:\n" @@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc static inline void arch_read_unlock(arch_rwlock_t *rw) { - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); + asm volatile(LOCK_PREFIX "incl %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "decl %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + + :"+m" (rw->lock) : : "memory"); } static inline void arch_write_unlock(arch_rwlock_t *rw) { - asm volatile(LOCK_PREFIX "addl %1, %0" + asm volatile(LOCK_PREFIX "addl %1, %0\n" + +#ifdef CONFIG_PAX_REFCOUNT + "jno 0f\n" + LOCK_PREFIX "subl %1, %0\n" + "int $4\n0:\n" + _ASM_EXTABLE(0b, 0b) +#endif + : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); } diff -urNp linux-2.6.39.1/arch/x86/include/asm/stackprotector.h linux-2.6.39.1/arch/x86/include/asm/stackprotector.h --- linux-2.6.39.1/arch/x86/include/asm/stackprotector.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/stackprotector.h 2011-05-22 19:36:30.000000000 -0400 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se static inline void load_stack_canary_segment(void) { -#ifdef CONFIG_X86_32 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF) asm volatile ("mov %0, %%gs" : : "r" (0)); #endif } diff -urNp linux-2.6.39.1/arch/x86/include/asm/stacktrace.h linux-2.6.39.1/arch/x86/include/asm/stacktrace.h --- linux-2.6.39.1/arch/x86/include/asm/stacktrace.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/stacktrace.h 2011-05-22 19:36:30.000000000 -0400 @@ -11,28 +11,20 @@ extern int kstack_depth_to_print; -struct thread_info; +struct task_struct; struct stacktrace_ops; -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo, - unsigned long *stack, - unsigned long bp, - const struct stacktrace_ops *ops, - void *data, - unsigned long *end, - int *graph); - -extern unsigned long -print_context_stack(struct thread_info *tinfo, - unsigned long *stack, unsigned long bp, - const struct stacktrace_ops *ops, void *data, - unsigned long *end, int *graph); - -extern unsigned long -print_context_stack_bp(struct thread_info *tinfo, - unsigned long *stack, unsigned long bp, - const struct stacktrace_ops *ops, void *data, - unsigned long *end, int *graph); +typedef unsigned long walk_stack_t(struct task_struct *task, + void *stack_start, + unsigned long *stack, + unsigned long bp, + const struct stacktrace_ops *ops, + void *data, + unsigned long *end, + int *graph); + +extern walk_stack_t print_context_stack; +extern walk_stack_t print_context_stack_bp; /* Generic stack tracer with callbacks */ @@ -43,7 +35,7 @@ struct stacktrace_ops { void (*address)(void *data, unsigned long address, int reliable); /* On negative return stop dumping */ int (*stack)(void *data, char *name); - walk_stack_t walk_stack; + walk_stack_t *walk_stack; }; void dump_trace(struct task_struct *tsk, struct pt_regs *regs, diff -urNp linux-2.6.39.1/arch/x86/include/asm/system.h linux-2.6.39.1/arch/x86/include/asm/system.h --- linux-2.6.39.1/arch/x86/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/system.h 2011-05-22 19:36:30.000000000 -0400 @@ -129,7 +129,7 @@ do { \ "call __switch_to\n\t" \ "movq "__percpu_arg([current_task])",%%rsi\n\t" \ __switch_canary \ - "movq %P[thread_info](%%rsi),%%r8\n\t" \ + "movq "__percpu_arg([thread_info])",%%r8\n\t" \ "movq %%rax,%%rdi\n\t" \ "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ "jnz ret_from_fork\n\t" \ @@ -140,7 +140,7 @@ do { \ [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \ [ti_flags] "i" (offsetof(struct thread_info, flags)), \ [_tif_fork] "i" (_TIF_FORK), \ - [thread_info] "i" (offsetof(struct task_struct, stack)), \ + [thread_info] "m" (current_tinfo), \ [current_task] "m" (current_task) \ __switch_canary_iparam \ : "memory", "cc" __EXTRA_CLOBBER) @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un { unsigned long __limit; asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); - return __limit + 1; + return __limit; } static inline void native_clts(void) @@ -340,12 +340,12 @@ void enable_hlt(void); void cpu_idle_wait(void); -extern unsigned long arch_align_stack(unsigned long sp); +#define arch_align_stack(x) ((x) & ~0xfUL) extern void free_init_pages(char *what, unsigned long begin, unsigned long end); void default_idle(void); -void stop_this_cpu(void *dummy); +void stop_this_cpu(void *dummy) __noreturn; /* * Force strict CPU ordering. diff -urNp linux-2.6.39.1/arch/x86/include/asm/thread_info.h linux-2.6.39.1/arch/x86/include/asm/thread_info.h --- linux-2.6.39.1/arch/x86/include/asm/thread_info.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/thread_info.h 2011-05-22 19:36:30.000000000 -0400 @@ -10,6 +10,7 @@ #include #include #include +#include /* * low level task data that entry.S needs immediate access to @@ -24,7 +25,6 @@ struct exec_domain; #include struct thread_info { - struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ __u32 flags; /* low level flags */ __u32 status; /* thread synchronous flags */ @@ -34,18 +34,12 @@ struct thread_info { mm_segment_t addr_limit; struct restart_block restart_block; void __user *sysenter_return; -#ifdef CONFIG_X86_32 - unsigned long previous_esp; /* ESP of the previous stack in - case of nested (IRQ) stacks - */ - __u8 supervisor_stack[0]; -#endif + unsigned long lowest_stack; int uaccess_err; }; -#define INIT_THREAD_INFO(tsk) \ +#define INIT_THREAD_INFO \ { \ - .task = &tsk, \ .exec_domain = &default_exec_domain, \ .flags = 0, \ .cpu = 0, \ @@ -56,7 +50,7 @@ struct thread_info { }, \ } -#define init_thread_info (init_thread_union.thread_info) +#define init_thread_info (init_thread_union.stack) #define init_stack (init_thread_union.stack) #else /* !__ASSEMBLY__ */ @@ -170,6 +164,23 @@ struct thread_info { ret; \ }) +#ifdef __ASSEMBLY__ +/* how to get the thread information struct from ASM */ +#define GET_THREAD_INFO(reg) \ + mov PER_CPU_VAR(current_tinfo), reg + +/* use this one if reg already contains %esp */ +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg) +#else +/* how to get the thread information struct from C */ +DECLARE_PER_CPU(struct thread_info *, current_tinfo); + +static __always_inline struct thread_info *current_thread_info(void) +{ + return percpu_read_stable(current_tinfo); +} +#endif + #ifdef CONFIG_X86_32 #define STACK_WARN (THREAD_SIZE/8) @@ -180,35 +191,13 @@ struct thread_info { */ #ifndef __ASSEMBLY__ - /* how to get the current stack pointer from C */ register unsigned long current_stack_pointer asm("esp") __used; -/* how to get the thread information struct from C */ -static inline struct thread_info *current_thread_info(void) -{ - return (struct thread_info *) - (current_stack_pointer & ~(THREAD_SIZE - 1)); -} - -#else /* !__ASSEMBLY__ */ - -/* how to get the thread information struct from ASM */ -#define GET_THREAD_INFO(reg) \ - movl $-THREAD_SIZE, reg; \ - andl %esp, reg - -/* use this one if reg already contains %esp */ -#define GET_THREAD_INFO_WITH_ESP(reg) \ - andl $-THREAD_SIZE, reg - #endif #else /* X86_32 */ -#include -#define KERNEL_STACK_OFFSET (5*8) - /* * macros/functions for gaining access to the thread information structure * preempt_count needs to be 1 initially, until the scheduler is functional. @@ -216,21 +205,8 @@ static inline struct thread_info *curren #ifndef __ASSEMBLY__ DECLARE_PER_CPU(unsigned long, kernel_stack); -static inline struct thread_info *current_thread_info(void) -{ - struct thread_info *ti; - ti = (void *)(percpu_read_stable(kernel_stack) + - KERNEL_STACK_OFFSET - THREAD_SIZE); - return ti; -} - -#else /* !__ASSEMBLY__ */ - -/* how to get the thread information struct from ASM */ -#define GET_THREAD_INFO(reg) \ - movq PER_CPU_VAR(kernel_stack),reg ; \ - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg - +/* how to get the current stack pointer from C */ +register unsigned long current_stack_pointer asm("rsp") __used; #endif #endif /* !X86_32 */ @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void); extern void free_thread_info(struct thread_info *ti); extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #define arch_task_cache_init arch_task_cache_init + +#define __HAVE_THREAD_FUNCTIONS +#define task_thread_info(task) (&(task)->tinfo) +#define task_stack_page(task) ((task)->stack) +#define setup_thread_stack(p, org) do {} while (0) +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1) + +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR +extern struct task_struct *alloc_task_struct_node(int node); +extern void free_task_struct(struct task_struct *); + #endif #endif /* _ASM_X86_THREAD_INFO_H */ diff -urNp linux-2.6.39.1/arch/x86/include/asm/uaccess_32.h linux-2.6.39.1/arch/x86/include/asm/uaccess_32.h --- linux-2.6.39.1/arch/x86/include/asm/uaccess_32.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/uaccess_32.h 2011-05-22 19:36:30.000000000 -0400 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u static __always_inline unsigned long __must_check __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { + pax_track_stack(); + + if ((long)n < 0) + return n; + if (__builtin_constant_p(n)) { unsigned long ret; @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to, return ret; } } + if (!__builtin_constant_p(n)) + check_object_size(from, n, true); return __copy_to_user_ll(to, from, n); } @@ -83,12 +90,16 @@ static __always_inline unsigned long __m __copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); + return __copy_to_user_inatomic(to, from, n); } static __always_inline unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { + if ((long)n < 0) + return n; + /* Avoid zeroing the tail if the copy fails.. * If 'n' is constant and 1, 2, or 4, we do still zero on a failure, * but as the zeroing behaviour is only significant when n is not @@ -138,6 +149,12 @@ static __always_inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { might_fault(); + + pax_track_stack(); + + if ((long)n < 0) + return n; + if (__builtin_constant_p(n)) { unsigned long ret; @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __ return ret; } } + if (!__builtin_constant_p(n)) + check_object_size(to, n, false); return __copy_from_user_ll(to, from, n); } @@ -160,6 +179,10 @@ static __always_inline unsigned long __c const void __user *from, unsigned long n) { might_fault(); + + if ((long)n < 0) + return n; + if (__builtin_constant_p(n)) { unsigned long ret; @@ -182,15 +205,19 @@ static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n) { - return __copy_from_user_ll_nocache_nozero(to, from, n); -} + if ((long)n < 0) + return n; -unsigned long __must_check copy_to_user(void __user *to, - const void *from, unsigned long n); -unsigned long __must_check _copy_from_user(void *to, - const void __user *from, - unsigned long n); + return __copy_from_user_ll_nocache_nozero(to, from, n); +} +extern void copy_to_user_overflow(void) +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS + __compiletime_error("copy_to_user() buffer size is not provably correct") +#else + __compiletime_warning("copy_to_user() buffer size is not provably correct") +#endif +; extern void copy_from_user_overflow(void) #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS @@ -200,17 +227,61 @@ extern void copy_from_user_overflow(void #endif ; -static inline unsigned long __must_check copy_from_user(void *to, - const void __user *from, - unsigned long n) +/** + * copy_to_user: - Copy a block of data into user space. + * @to: Destination address, in user space. + * @from: Source address, in kernel space. + * @n: Number of bytes to copy. + * + * Context: User context only. This function may sleep. + * + * Copy data from kernel space to user space. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + */ +static inline unsigned long __must_check +copy_to_user(void __user *to, const void *from, unsigned long n) +{ + int sz = __compiletime_object_size(from); + + if (unlikely(sz != -1 && sz < n)) + copy_to_user_overflow(); + else if (access_ok(VERIFY_WRITE, to, n)) + n = __copy_to_user(to, from, n); + return n; +} + +/** + * copy_from_user: - Copy a block of data from user space. + * @to: Destination address, in kernel space. + * @from: Source address, in user space. + * @n: Number of bytes to copy. + * + * Context: User context only. This function may sleep. + * + * Copy data from user space to kernel space. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + * + * If some data could not be copied, this function will pad the copied + * data to the requested size using zero bytes. + */ +static inline unsigned long __must_check +copy_from_user(void *to, const void __user *from, unsigned long n) { int sz = __compiletime_object_size(to); - if (likely(sz == -1 || sz >= n)) - n = _copy_from_user(to, from, n); - else + if (unlikely(sz != -1 && sz < n)) copy_from_user_overflow(); - + else if (access_ok(VERIFY_READ, from, n)) + n = __copy_from_user(to, from, n); + else if ((long)n > 0) { + if (!__builtin_constant_p(n)) + check_object_size(to, n, false); + memset(to, 0, n); + } return n; } diff -urNp linux-2.6.39.1/arch/x86/include/asm/uaccess_64.h linux-2.6.39.1/arch/x86/include/asm/uaccess_64.h --- linux-2.6.39.1/arch/x86/include/asm/uaccess_64.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/uaccess_64.h 2011-05-22 19:36:30.000000000 -0400 @@ -11,6 +11,9 @@ #include #include #include +#include + +#define set_fs(x) (current_thread_info()->addr_limit = (x)) /* * Copy To/From Userspace @@ -37,26 +40,26 @@ copy_user_generic(void *to, const void * return ret; } -__must_check unsigned long -_copy_to_user(void __user *to, const void *from, unsigned len); -__must_check unsigned long -_copy_from_user(void *to, const void __user *from, unsigned len); +static __always_inline __must_check unsigned long +__copy_to_user(void __user *to, const void *from, unsigned len); +static __always_inline __must_check unsigned long +__copy_from_user(void *to, const void __user *from, unsigned len); __must_check unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len); static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, - unsigned long n) + unsigned n) { - int sz = __compiletime_object_size(to); - might_fault(); - if (likely(sz == -1 || sz >= n)) - n = _copy_from_user(to, from, n); -#ifdef CONFIG_DEBUG_VM - else - WARN(1, "Buffer overflow detected!\n"); -#endif + + if (access_ok(VERIFY_READ, from, n)) + n = __copy_from_user(to, from, n); + else if ((int)n > 0) { + if (!__builtin_constant_p(n)) + check_object_size(to, n, false); + memset(to, 0, n); + } return n; } @@ -65,110 +68,198 @@ int copy_to_user(void __user *dst, const { might_fault(); - return _copy_to_user(dst, src, size); + if (access_ok(VERIFY_WRITE, dst, size)) + size = __copy_to_user(dst, src, size); + return size; } static __always_inline __must_check -int __copy_from_user(void *dst, const void __user *src, unsigned size) +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size) { - int ret = 0; + int sz = __compiletime_object_size(dst); + unsigned ret = 0; might_fault(); - if (!__builtin_constant_p(size)) - return copy_user_generic(dst, (__force void *)src, size); + + pax_track_stack(); + + if ((int)size < 0) + return size; + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (!__access_ok(VERIFY_READ, src, size)) + return size; +#endif + + if (unlikely(sz != -1 && sz < size)) { +#ifdef CONFIG_DEBUG_VM + WARN(1, "Buffer overflow detected!\n"); +#endif + return size; + } + + if (!__builtin_constant_p(size)) { + check_object_size(dst, size, false); + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if ((unsigned long)src < PAX_USER_SHADOW_BASE) + src += PAX_USER_SHADOW_BASE; +#endif + + return copy_user_generic(dst, (__force const void *)src, size); + } switch (size) { - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src, + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src, ret, "b", "b", "=q", 1); return ret; - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src, + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src, ret, "w", "w", "=r", 2); return ret; - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src, + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src, ret, "l", "k", "=r", 4); return ret; - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src, + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src, ret, "q", "", "=r", 8); return ret; case 10: - __get_user_asm(*(u64 *)dst, (u64 __user *)src, + __get_user_asm(*(u64 *)dst, (const u64 __user *)src, ret, "q", "", "=r", 10); if (unlikely(ret)) return ret; __get_user_asm(*(u16 *)(8 + (char *)dst), - (u16 __user *)(8 + (char __user *)src), + (const u16 __user *)(8 + (const char __user *)src), ret, "w", "w", "=r", 2); return ret; case 16: - __get_user_asm(*(u64 *)dst, (u64 __user *)src, + __get_user_asm(*(u64 *)dst, (const u64 __user *)src, ret, "q", "", "=r", 16); if (unlikely(ret)) return ret; __get_user_asm(*(u64 *)(8 + (char *)dst), - (u64 __user *)(8 + (char __user *)src), + (const u64 __user *)(8 + (const char __user *)src), ret, "q", "", "=r", 8); return ret; default: - return copy_user_generic(dst, (__force void *)src, size); + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if ((unsigned long)src < PAX_USER_SHADOW_BASE) + src += PAX_USER_SHADOW_BASE; +#endif + + return copy_user_generic(dst, (__force const void *)src, size); } } static __always_inline __must_check -int __copy_to_user(void __user *dst, const void *src, unsigned size) +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size) { - int ret = 0; + int sz = __compiletime_object_size(src); + unsigned ret = 0; might_fault(); - if (!__builtin_constant_p(size)) + + pax_track_stack(); + + if ((int)size < 0) + return size; + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (!__access_ok(VERIFY_WRITE, dst, size)) + return size; +#endif + + if (unlikely(sz != -1 && sz < size)) { +#ifdef CONFIG_DEBUG_VM + WARN(1, "Buffer overflow detected!\n"); +#endif + return size; + } + + if (!__builtin_constant_p(size)) { + check_object_size(src, size, true); + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if ((unsigned long)dst < PAX_USER_SHADOW_BASE) + dst += PAX_USER_SHADOW_BASE; +#endif + return copy_user_generic((__force void *)dst, src, size); + } switch (size) { - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst, + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst, ret, "b", "b", "iq", 1); return ret; - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst, + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst, ret, "w", "w", "ir", 2); return ret; - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst, + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst, ret, "l", "k", "ir", 4); return ret; - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst, ret, "q", "", "er", 8); return ret; case 10: - __put_user_asm(*(u64 *)src, (u64 __user *)dst, + __put_user_asm(*(const u64 *)src, (u64 __user *)dst, ret, "q", "", "er", 10); if (unlikely(ret)) return ret; asm("":::"memory"); - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst, + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst, ret, "w", "w", "ir", 2); return ret; case 16: - __put_user_asm(*(u64 *)src, (u64 __user *)dst, + __put_user_asm(*(const u64 *)src, (u64 __user *)dst, ret, "q", "", "er", 16); if (unlikely(ret)) return ret; asm("":::"memory"); - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst, ret, "q", "", "er", 8); return ret; default: + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if ((unsigned long)dst < PAX_USER_SHADOW_BASE) + dst += PAX_USER_SHADOW_BASE; +#endif + return copy_user_generic((__force void *)dst, src, size); } } static __always_inline __must_check -int __copy_in_user(void __user *dst, const void __user *src, unsigned size) +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size) { - int ret = 0; + unsigned ret = 0; might_fault(); - if (!__builtin_constant_p(size)) + + if ((int)size < 0) + return size; + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (!__access_ok(VERIFY_READ, src, size)) + return size; + if (!__access_ok(VERIFY_WRITE, dst, size)) + return size; +#endif + + if (!__builtin_constant_p(size)) { + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if ((unsigned long)src < PAX_USER_SHADOW_BASE) + src += PAX_USER_SHADOW_BASE; + if ((unsigned long)dst < PAX_USER_SHADOW_BASE) + dst += PAX_USER_SHADOW_BASE; +#endif + return copy_user_generic((__force void *)dst, - (__force void *)src, size); + (__force const void *)src, size); + } switch (size) { case 1: { u8 tmp; - __get_user_asm(tmp, (u8 __user *)src, + __get_user_asm(tmp, (const u8 __user *)src, ret, "b", "b", "=q", 1); if (likely(!ret)) __put_user_asm(tmp, (u8 __user *)dst, @@ -177,7 +268,7 @@ int __copy_in_user(void __user *dst, con } case 2: { u16 tmp; - __get_user_asm(tmp, (u16 __user *)src, + __get_user_asm(tmp, (const u16 __user *)src, ret, "w", "w", "=r", 2); if (likely(!ret)) __put_user_asm(tmp, (u16 __user *)dst, @@ -187,7 +278,7 @@ int __copy_in_user(void __user *dst, con case 4: { u32 tmp; - __get_user_asm(tmp, (u32 __user *)src, + __get_user_asm(tmp, (const u32 __user *)src, ret, "l", "k", "=r", 4); if (likely(!ret)) __put_user_asm(tmp, (u32 __user *)dst, @@ -196,7 +287,7 @@ int __copy_in_user(void __user *dst, con } case 8: { u64 tmp; - __get_user_asm(tmp, (u64 __user *)src, + __get_user_asm(tmp, (const u64 __user *)src, ret, "q", "", "=r", 8); if (likely(!ret)) __put_user_asm(tmp, (u64 __user *)dst, @@ -204,8 +295,16 @@ int __copy_in_user(void __user *dst, con return ret; } default: + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if ((unsigned long)src < PAX_USER_SHADOW_BASE) + src += PAX_USER_SHADOW_BASE; + if ((unsigned long)dst < PAX_USER_SHADOW_BASE) + dst += PAX_USER_SHADOW_BASE; +#endif + return copy_user_generic((__force void *)dst, - (__force void *)src, size); + (__force const void *)src, size); } } @@ -222,33 +321,72 @@ __must_check unsigned long __clear_user( static __must_check __always_inline int __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) { + pax_track_stack(); + + if ((int)size < 0) + return size; + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (!__access_ok(VERIFY_READ, src, size)) + return size; + + if ((unsigned long)src < PAX_USER_SHADOW_BASE) + src += PAX_USER_SHADOW_BASE; +#endif + return copy_user_generic(dst, (__force const void *)src, size); } -static __must_check __always_inline int +static __must_check __always_inline unsigned long __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) { + if ((int)size < 0) + return size; + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (!__access_ok(VERIFY_WRITE, dst, size)) + return size; + + if ((unsigned long)dst < PAX_USER_SHADOW_BASE) + dst += PAX_USER_SHADOW_BASE; +#endif + return copy_user_generic((__force void *)dst, src, size); } -extern long __copy_user_nocache(void *dst, const void __user *src, +extern unsigned long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest); -static inline int -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size) +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) { might_sleep(); + + if ((int)size < 0) + return size; + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (!__access_ok(VERIFY_READ, src, size)) + return size; +#endif + return __copy_user_nocache(dst, src, size, 1); } -static inline int -__copy_from_user_inatomic_nocache(void *dst, const void __user *src, +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size) { + if ((int)size < 0) + return size; + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if (!__access_ok(VERIFY_READ, src, size)) + return size; +#endif + return __copy_user_nocache(dst, src, size, 0); } -unsigned long +extern unsigned long copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest); #endif /* _ASM_X86_UACCESS_64_H */ diff -urNp linux-2.6.39.1/arch/x86/include/asm/uaccess.h linux-2.6.39.1/arch/x86/include/asm/uaccess.h --- linux-2.6.39.1/arch/x86/include/asm/uaccess.h 2011-06-03 00:04:13.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/uaccess.h 2011-06-03 00:32:04.000000000 -0400 @@ -8,12 +8,15 @@ #include #include #include +#include #include #include #define VERIFY_READ 0 #define VERIFY_WRITE 1 +extern void check_object_size(const void *ptr, unsigned long n, bool to); + /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with @@ -29,7 +32,12 @@ #define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) +void __set_fs(mm_segment_t x); +void set_fs(mm_segment_t x); +#else #define set_fs(x) (current_thread_info()->addr_limit = (x)) +#endif #define segment_eq(a, b) ((a).seg == (b).seg) @@ -77,7 +85,33 @@ * checks that the pointer is in the user space range - after calling * this function, memory access functions may still return -EFAULT. */ -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) +#define access_ok(type, addr, size) \ +({ \ + long __size = size; \ + unsigned long __addr = (unsigned long)addr; \ + unsigned long __addr_ao = __addr & PAGE_MASK; \ + unsigned long __end_ao = __addr + __size - 1; \ + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \ + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \ + while(__addr_ao <= __end_ao) { \ + char __c_ao; \ + __addr_ao += PAGE_SIZE; \ + if (__size > PAGE_SIZE) \ + cond_resched(); \ + if (__get_user(__c_ao, (char __user *)__addr)) \ + break; \ + if (type != VERIFY_WRITE) { \ + __addr = __addr_ao; \ + continue; \ + } \ + if (__put_user(__c_ao, (char __user *)__addr)) \ + break; \ + __addr = __addr_ao; \ + } \ + } \ + __ret_ao; \ +}) /* * The exception table consists of pairs of addresses: the first is the @@ -183,12 +217,20 @@ extern int __get_user_bad(void); asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") - +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) +#define __copyuser_seg "gs;" +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n" +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n" +#else +#define __copyuser_seg +#define __COPYUSER_SET_ES +#define __COPYUSER_RESTORE_ES +#endif #ifdef CONFIG_X86_32 #define __put_user_asm_u64(x, addr, err, errret) \ - asm volatile("1: movl %%eax,0(%2)\n" \ - "2: movl %%edx,4(%2)\n" \ + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \ + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \ "3:\n" \ ".section .fixup,\"ax\"\n" \ "4: movl %3,%0\n" \ @@ -200,8 +242,8 @@ extern int __get_user_bad(void); : "A" (x), "r" (addr), "i" (errret), "0" (err)) #define __put_user_asm_ex_u64(x, addr) \ - asm volatile("1: movl %%eax,0(%1)\n" \ - "2: movl %%edx,4(%1)\n" \ + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \ + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \ "3:\n" \ _ASM_EXTABLE(1b, 2b - 1b) \ _ASM_EXTABLE(2b, 3b - 2b) \ @@ -374,7 +416,7 @@ do { \ } while (0) #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ - asm volatile("1: mov"itype" %2,%"rtype"1\n" \ + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: mov %3,%0\n" \ @@ -382,7 +424,7 @@ do { \ " jmp 2b\n" \ ".previous\n" \ _ASM_EXTABLE(1b, 3b) \ - : "=r" (err), ltype(x) \ + : "=r" (err), ltype (x) \ : "m" (__m(addr)), "i" (errret), "0" (err)) #define __get_user_size_ex(x, ptr, size) \ @@ -407,7 +449,7 @@ do { \ } while (0) #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ - asm volatile("1: mov"itype" %1,%"rtype"0\n" \ + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\ "2:\n" \ _ASM_EXTABLE(1b, 2b - 1b) \ : ltype(x) : "m" (__m(addr))) @@ -424,13 +466,24 @@ do { \ int __gu_err; \ unsigned long __gu_val; \ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ + (x) = (__typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) /* FIXME: this hack is definitely wrong -AK */ struct __large_struct { unsigned long buf[100]; }; -#define __m(x) (*(struct __large_struct __user *)(x)) +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +#define ____m(x) \ +({ \ + unsigned long ____x = (unsigned long)(x); \ + if (____x < PAX_USER_SHADOW_BASE) \ + ____x += PAX_USER_SHADOW_BASE; \ + (void __user *)____x; \ +}) +#else +#define ____m(x) (x) +#endif +#define __m(x) (*(struct __large_struct __user *)____m(x)) /* * Tell gcc we read from memory instead of writing: this is because @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu * aliasing issues. */ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ - asm volatile("1: mov"itype" %"rtype"1,%2\n" \ + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: mov %3,%0\n" \ @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu ".previous\n" \ _ASM_EXTABLE(1b, 3b) \ : "=r"(err) \ - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err)) #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ - asm volatile("1: mov"itype" %"rtype"0,%1\n" \ + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\ "2:\n" \ _ASM_EXTABLE(1b, 2b - 1b) \ : : ltype(x), "m" (__m(addr))) @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu * On error, the variable @x is set to zero. */ +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +#define __get_user(x, ptr) get_user((x), (ptr)) +#else #define __get_user(x, ptr) \ __get_user_nocheck((x), (ptr), sizeof(*(ptr))) +#endif /** * __put_user: - Write a simple value into user space, with less checking. @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu * Returns zero on success, or -EFAULT on error. */ +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +#define __put_user(x, ptr) put_user((x), (ptr)) +#else #define __put_user(x, ptr) \ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#endif #define __get_user_unaligned __get_user #define __put_user_unaligned __put_user @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu #define get_user_ex(x, ptr) do { \ unsigned long __gue_val; \ __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ - (x) = (__force __typeof__(*(ptr)))__gue_val; \ + (x) = (__typeof__(*(ptr)))__gue_val; \ } while (0) #ifdef CONFIG_X86_WP_WORKS_OK @@ -567,6 +628,7 @@ extern struct movsl_mask { #define ARCH_HAS_NOCACHE_UACCESS 1 +#define ARCH_HAS_SORT_EXTABLE #ifdef CONFIG_X86_32 # include "uaccess_32.h" #else diff -urNp linux-2.6.39.1/arch/x86/include/asm/vgtod.h linux-2.6.39.1/arch/x86/include/asm/vgtod.h --- linux-2.6.39.1/arch/x86/include/asm/vgtod.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/vgtod.h 2011-05-22 19:36:30.000000000 -0400 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data { int sysctl_enabled; struct timezone sys_tz; struct { /* extract of a clocksource struct */ + char name[8]; cycle_t (*vread)(void); cycle_t cycle_last; cycle_t mask; diff -urNp linux-2.6.39.1/arch/x86/include/asm/vsyscall.h linux-2.6.39.1/arch/x86/include/asm/vsyscall.h --- linux-2.6.39.1/arch/x86/include/asm/vsyscall.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/vsyscall.h 2011-05-22 19:36:30.000000000 -0400 @@ -15,9 +15,10 @@ enum vsyscall_num { #ifdef __KERNEL__ #include +#include +#include #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16))) -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16))) /* Definitions for CONFIG_GENERIC_TIME definitions */ #define __section_vsyscall_gtod_data __attribute__ \ @@ -31,7 +32,6 @@ enum vsyscall_num { #define VGETCPU_LSL 2 extern int __vgetcpu_mode; -extern volatile unsigned long __jiffies; /* kernel space (writeable) */ extern int vgetcpu_mode; @@ -39,6 +39,9 @@ extern struct timezone sys_tz; extern void map_vsyscall(void); +extern int vgettimeofday(struct timeval * tv, struct timezone * tz); +extern time_t vtime(time_t *t); +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache); #endif /* __KERNEL__ */ #endif /* _ASM_X86_VSYSCALL_H */ diff -urNp linux-2.6.39.1/arch/x86/include/asm/xen/pci.h linux-2.6.39.1/arch/x86/include/asm/xen/pci.h --- linux-2.6.39.1/arch/x86/include/asm/xen/pci.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/xen/pci.h 2011-05-22 19:36:30.000000000 -0400 @@ -33,7 +33,7 @@ struct xen_pci_frontend_ops { void (*disable_msix)(struct pci_dev *dev); }; -extern struct xen_pci_frontend_ops *xen_pci_frontend; +extern const struct xen_pci_frontend_ops *xen_pci_frontend; static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev, int vectors[]) diff -urNp linux-2.6.39.1/arch/x86/include/asm/xsave.h linux-2.6.39.1/arch/x86/include/asm/xsave.h --- linux-2.6.39.1/arch/x86/include/asm/xsave.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/include/asm/xsave.h 2011-05-22 19:36:30.000000000 -0400 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav { int err; +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + if ((unsigned long)buf < PAX_USER_SHADOW_BASE) + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE); +#endif + /* * Clear the xsave header first, so that reserved fields are * initialized to zero. @@ -100,6 +105,11 @@ static inline int xrestore_user(struct x u32 lmask = mask; u32 hmask = mask >> 32; +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE) + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE); +#endif + __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" "2:\n" ".section .fixup,\"ax\"\n" diff -urNp linux-2.6.39.1/arch/x86/Kconfig linux-2.6.39.1/arch/x86/Kconfig --- linux-2.6.39.1/arch/x86/Kconfig 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/Kconfig 2011-05-22 19:41:32.000000000 -0400 @@ -224,7 +224,7 @@ config X86_HT config X86_32_LAZY_GS def_bool y - depends on X86_32 && !CC_STACKPROTECTOR + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF config ARCH_HWEIGHT_CFLAGS string @@ -1022,7 +1022,7 @@ choice config NOHIGHMEM bool "off" - depends on !X86_NUMAQ + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE) ---help--- Linux can use up to 64 Gigabytes of physical memory on x86 systems. However, the address space of 32-bit x86 processors is only 4 @@ -1059,7 +1059,7 @@ config NOHIGHMEM config HIGHMEM4G bool "4GB" - depends on !X86_NUMAQ + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE) ---help--- Select this if you have a 32-bit processor and between 1 and 4 gigabytes of physical RAM. @@ -1113,7 +1113,7 @@ config PAGE_OFFSET hex default 0xB0000000 if VMSPLIT_3G_OPT default 0x80000000 if VMSPLIT_2G - default 0x78000000 if VMSPLIT_2G_OPT + default 0x70000000 if VMSPLIT_2G_OPT default 0x40000000 if VMSPLIT_1G default 0xC0000000 depends on X86_32 @@ -1457,7 +1457,7 @@ config ARCH_USES_PG_UNCACHED config EFI bool "EFI runtime service support" - depends on ACPI + depends on ACPI && !PAX_KERNEXEC ---help--- This enables the kernel to use EFI runtime services that are available (such as the EFI variable services). @@ -1487,6 +1487,7 @@ config SECCOMP config CC_STACKPROTECTOR bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" + depends on X86_64 || !PAX_MEMORY_UDEREF ---help--- This option turns on the -fstack-protector GCC feature. This feature puts, at the beginning of functions, a canary value on @@ -1544,6 +1545,7 @@ config KEXEC_JUMP config PHYSICAL_START hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP) default "0x1000000" + range 0x400000 0x40000000 ---help--- This gives the physical address where the kernel is loaded. @@ -1607,6 +1609,7 @@ config X86_NEED_RELOCS config PHYSICAL_ALIGN hex "Alignment value to which kernel should be aligned" if X86_32 default "0x1000000" + range 0x400000 0x1000000 if PAX_KERNEXEC range 0x2000 0x1000000 ---help--- This value puts the alignment restrictions on physical address @@ -1638,9 +1641,10 @@ config HOTPLUG_CPU Say N if you want to disable CPU hotplug. config COMPAT_VDSO - def_bool y + def_bool n prompt "Compat VDSO support" depends on X86_32 || IA32_EMULATION + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF ---help--- Map the 32-bit VDSO to the predictable old-style address too. diff -urNp linux-2.6.39.1/arch/x86/Kconfig.cpu linux-2.6.39.1/arch/x86/Kconfig.cpu --- linux-2.6.39.1/arch/x86/Kconfig.cpu 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/Kconfig.cpu 2011-05-22 19:36:30.000000000 -0400 @@ -334,7 +334,7 @@ config X86_PPRO_FENCE config X86_F00F_BUG def_bool y - depends on M586MMX || M586TSC || M586 || M486 || M386 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC config X86_INVD_BUG def_bool y @@ -358,7 +358,7 @@ config X86_POPAD_OK config X86_ALIGNMENT_16 def_bool y - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 config X86_INTEL_USERCOPY def_bool y @@ -404,7 +404,7 @@ config X86_CMPXCHG64 # generates cmov. config X86_CMOV def_bool y - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) config X86_MINIMUM_CPU_FAMILY int diff -urNp linux-2.6.39.1/arch/x86/Kconfig.debug linux-2.6.39.1/arch/x86/Kconfig.debug --- linux-2.6.39.1/arch/x86/Kconfig.debug 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/Kconfig.debug 2011-05-22 19:36:30.000000000 -0400 @@ -101,7 +101,7 @@ config X86_PTDUMP config DEBUG_RODATA bool "Write protect kernel read-only data structures" default y - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && BROKEN ---help--- Mark the kernel read-only data as write-protected in the pagetables, in order to catch accidental (and incorrect) writes to such const @@ -119,7 +119,7 @@ config DEBUG_RODATA_TEST config DEBUG_SET_MODULE_RONX bool "Set loadable kernel module data as NX and text as RO" - depends on MODULES + depends on MODULES && BROKEN ---help--- This option helps catch unintended modifications to loadable kernel module's text and read-only data. It also prevents execution diff -urNp linux-2.6.39.1/arch/x86/kernel/acpi/sleep.c linux-2.6.39.1/arch/x86/kernel/acpi/sleep.c --- linux-2.6.39.1/arch/x86/kernel/acpi/sleep.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/acpi/sleep.c 2011-05-22 19:36:30.000000000 -0400 @@ -88,8 +88,12 @@ int acpi_suspend_lowlevel(void) header->trampoline_segment = trampoline_address() >> 4; #ifdef CONFIG_SMP stack_start = (unsigned long)temp_stack + sizeof(temp_stack); + + pax_open_kernel(); early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(smp_processor_id()); + pax_close_kernel(); + initial_gs = per_cpu_offset(smp_processor_id()); #endif initial_code = (unsigned long)wakeup_long64; diff -urNp linux-2.6.39.1/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.39.1/arch/x86/kernel/acpi/wakeup_32.S --- linux-2.6.39.1/arch/x86/kernel/acpi/wakeup_32.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/acpi/wakeup_32.S 2011-05-22 19:36:30.000000000 -0400 @@ -30,13 +30,11 @@ wakeup_pmode_return: # and restore the stack ... but you need gdt for this to work movl saved_context_esp, %esp - movl %cs:saved_magic, %eax - cmpl $0x12345678, %eax + cmpl $0x12345678, saved_magic jne bogus_magic # jump to place where we left off - movl saved_eip, %eax - jmp *%eax + jmp *(saved_eip) bogus_magic: jmp bogus_magic diff -urNp linux-2.6.39.1/arch/x86/kernel/alternative.c linux-2.6.39.1/arch/x86/kernel/alternative.c --- linux-2.6.39.1/arch/x86/kernel/alternative.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/alternative.c 2011-05-22 19:36:30.000000000 -0400 @@ -248,7 +248,7 @@ static void alternatives_smp_lock(const if (!*poff || ptr < text || ptr >= text_end) continue; /* turn DS segment override prefix into lock prefix */ - if (*ptr == 0x3e) + if (*ktla_ktva(ptr) == 0x3e) text_poke(ptr, ((unsigned char []){0xf0}), 1); }; mutex_unlock(&text_mutex); @@ -269,7 +269,7 @@ static void alternatives_smp_unlock(cons if (!*poff || ptr < text || ptr >= text_end) continue; /* turn lock prefix into DS segment override prefix */ - if (*ptr == 0xf0) + if (*ktla_ktva(ptr) == 0xf0) text_poke(ptr, ((unsigned char []){0x3E}), 1); }; mutex_unlock(&text_mutex); @@ -438,7 +438,7 @@ void __init_or_module apply_paravirt(str BUG_ON(p->len > MAX_PATCH_LEN); /* prep the buffer with the original instructions */ - memcpy(insnbuf, p->instr, p->len); + memcpy(insnbuf, ktla_ktva(p->instr), p->len); used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf, (unsigned long)p->instr, p->len); @@ -506,7 +506,7 @@ void __init alternative_instructions(voi if (smp_alt_once) free_init_pages("SMP alternatives", (unsigned long)__smp_locks, - (unsigned long)__smp_locks_end); + PAGE_ALIGN((unsigned long)__smp_locks_end)); restart_nmi(); } @@ -523,13 +523,17 @@ void __init alternative_instructions(voi * instructions. And on the local CPU you need to be protected again NMI or MCE * handlers seeing an inconsistent instruction while you patch. */ -void *__init_or_module text_poke_early(void *addr, const void *opcode, +void *__kprobes text_poke_early(void *addr, const void *opcode, size_t len) { unsigned long flags; local_irq_save(flags); - memcpy(addr, opcode, len); + + pax_open_kernel(); + memcpy(ktla_ktva(addr), opcode, len); sync_core(); + pax_close_kernel(); + local_irq_restore(flags); /* Could also do a CLFLUSH here to speed up CPU recovery; but that causes hangs on some VIA CPUs. */ @@ -551,36 +555,22 @@ void *__init_or_module text_poke_early(v */ void *__kprobes text_poke(void *addr, const void *opcode, size_t len) { - unsigned long flags; - char *vaddr; + unsigned char *vaddr = ktla_ktva(addr); struct page *pages[2]; - int i; + size_t i; if (!core_kernel_text((unsigned long)addr)) { - pages[0] = vmalloc_to_page(addr); - pages[1] = vmalloc_to_page(addr + PAGE_SIZE); + pages[0] = vmalloc_to_page(vaddr); + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE); } else { - pages[0] = virt_to_page(addr); + pages[0] = virt_to_page(vaddr); WARN_ON(!PageReserved(pages[0])); - pages[1] = virt_to_page(addr + PAGE_SIZE); + pages[1] = virt_to_page(vaddr + PAGE_SIZE); } BUG_ON(!pages[0]); - local_irq_save(flags); - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); - if (pages[1]) - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0); - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); - clear_fixmap(FIX_TEXT_POKE0); - if (pages[1]) - clear_fixmap(FIX_TEXT_POKE1); - local_flush_tlb(); - sync_core(); - /* Could also do a CLFLUSH here to speed up CPU recovery; but - that causes hangs on some VIA CPUs. */ + text_poke_early(addr, opcode, len); for (i = 0; i < len; i++) - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); - local_irq_restore(flags); + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]); return addr; } @@ -682,9 +672,9 @@ void __kprobes text_poke_smp_batch(struc #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) #ifdef CONFIG_X86_64 -unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 }; +unsigned char ideal_nop5[5] __read_only = { 0x66, 0x66, 0x66, 0x66, 0x90 }; #else -unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 }; +unsigned char ideal_nop5[5] __read_only = { 0x3e, 0x8d, 0x74, 0x26, 0x00 }; #endif void __init arch_init_ideal_nop5(void) diff -urNp linux-2.6.39.1/arch/x86/kernel/amd_iommu.c linux-2.6.39.1/arch/x86/kernel/amd_iommu.c --- linux-2.6.39.1/arch/x86/kernel/amd_iommu.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/amd_iommu.c 2011-05-22 19:36:30.000000000 -0400 @@ -48,7 +48,7 @@ static DEFINE_SPINLOCK(iommu_pd_list_loc */ static struct protection_domain *pt_domain; -static struct iommu_ops amd_iommu_ops; +static const struct iommu_ops amd_iommu_ops; /* * general struct to manage commands send to an IOMMU @@ -2286,7 +2286,7 @@ static void prealloc_protection_domains( } } -static struct dma_map_ops amd_iommu_dma_ops = { +static const struct dma_map_ops amd_iommu_dma_ops = { .alloc_coherent = alloc_coherent, .free_coherent = free_coherent, .map_page = map_page, @@ -2582,7 +2582,7 @@ static int amd_iommu_domain_has_cap(stru return 0; } -static struct iommu_ops amd_iommu_ops = { +static const struct iommu_ops amd_iommu_ops = { .domain_init = amd_iommu_domain_init, .domain_destroy = amd_iommu_domain_destroy, .attach_dev = amd_iommu_attach_device, diff -urNp linux-2.6.39.1/arch/x86/kernel/apic/apic.c linux-2.6.39.1/arch/x86/kernel/apic/apic.c --- linux-2.6.39.1/arch/x86/kernel/apic/apic.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/apic/apic.c 2011-05-22 19:36:30.000000000 -0400 @@ -1821,7 +1821,7 @@ void smp_error_interrupt(struct pt_regs apic_write(APIC_ESR, 0); v1 = apic_read(APIC_ESR); ack_APIC_irq(); - atomic_inc(&irq_err_count); + atomic_inc_unchecked(&irq_err_count); /* * Here is what the APIC error bits mean: @@ -2204,6 +2204,8 @@ static int __cpuinit apic_cluster_num(vo u16 *bios_cpu_apicid; DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS); + pax_track_stack(); + bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); bitmap_zero(clustermap, NUM_APIC_CLUSTERS); diff -urNp linux-2.6.39.1/arch/x86/kernel/apic/io_apic.c linux-2.6.39.1/arch/x86/kernel/apic/io_apic.c --- linux-2.6.39.1/arch/x86/kernel/apic/io_apic.c 2011-06-03 00:04:13.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/apic/io_apic.c 2011-06-03 00:42:37.000000000 -0400 @@ -623,7 +623,7 @@ struct IO_APIC_route_entry **alloc_ioapi ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, GFP_ATOMIC); if (!ioapic_entries) - return 0; + return NULL; for (apic = 0; apic < nr_ioapics; apic++) { ioapic_entries[apic] = @@ -640,7 +640,7 @@ nomem: kfree(ioapic_entries[apic]); kfree(ioapic_entries); - return 0; + return NULL; } /* @@ -1040,7 +1040,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, } EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); -void lock_vector_lock(void) +void lock_vector_lock(void) __acquires(vector_lock) { /* Used to the online set of cpus does not change * during assign_irq_vector. @@ -1048,7 +1048,7 @@ void lock_vector_lock(void) raw_spin_lock(&vector_lock); } -void unlock_vector_lock(void) +void unlock_vector_lock(void) __releases(vector_lock) { raw_spin_unlock(&vector_lock); } @@ -2379,7 +2379,7 @@ static void ack_apic_edge(struct irq_dat ack_APIC_irq(); } -atomic_t irq_mis_count; +atomic_unchecked_t irq_mis_count; /* * IO-APIC versions below 0x20 don't support EOI register. @@ -2487,7 +2487,7 @@ static void ack_apic_level(struct irq_da * at the cpu. */ if (!(v & (1 << (i & 0x1f)))) { - atomic_inc(&irq_mis_count); + atomic_inc_unchecked(&irq_mis_count); eoi_ioapic_irq(irq, cfg); } diff -urNp linux-2.6.39.1/arch/x86/kernel/apm_32.c linux-2.6.39.1/arch/x86/kernel/apm_32.c --- linux-2.6.39.1/arch/x86/kernel/apm_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/apm_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex); * This is for buggy BIOS's that refer to (real mode) segment 0x40 * even though they are called in protected mode. */ -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093, (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); static const char driver_version[] = "1.16ac"; /* no spaces */ @@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call) BUG_ON(cpu != 0); gdt = get_cpu_gdt_table(cpu); save_desc_40 = gdt[0x40 / 8]; + + pax_open_kernel(); gdt[0x40 / 8] = bad_bios_desc; + pax_close_kernel(); apm_irq_save(flags); APM_DO_SAVE_SEGS; @@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call) &call->esi); APM_DO_RESTORE_SEGS; apm_irq_restore(flags); + + pax_open_kernel(); gdt[0x40 / 8] = save_desc_40; + pax_close_kernel(); + put_cpu(); return call->eax & 0xff; @@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void BUG_ON(cpu != 0); gdt = get_cpu_gdt_table(cpu); save_desc_40 = gdt[0x40 / 8]; + + pax_open_kernel(); gdt[0x40 / 8] = bad_bios_desc; + pax_close_kernel(); apm_irq_save(flags); APM_DO_SAVE_SEGS; @@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void &call->eax); APM_DO_RESTORE_SEGS; apm_irq_restore(flags); + + pax_open_kernel(); gdt[0x40 / 8] = save_desc_40; + pax_close_kernel(); + put_cpu(); return error; } @@ -2351,12 +2365,15 @@ static int __init apm_init(void) * code to that CPU. */ gdt = get_cpu_gdt_table(0); + + pax_open_kernel(); set_desc_base(&gdt[APM_CS >> 3], (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4)); set_desc_base(&gdt[APM_CS_16 >> 3], (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4)); set_desc_base(&gdt[APM_DS >> 3], (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4)); + pax_close_kernel(); proc_create("apm", 0, NULL, &apm_file_ops); diff -urNp linux-2.6.39.1/arch/x86/kernel/asm-offsets_64.c linux-2.6.39.1/arch/x86/kernel/asm-offsets_64.c --- linux-2.6.39.1/arch/x86/kernel/asm-offsets_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/asm-offsets_64.c 2011-05-22 19:36:30.000000000 -0400 @@ -69,6 +69,7 @@ int main(void) BLANK(); #undef ENTRY + DEFINE(TSS_size, sizeof(struct tss_struct)); OFFSET(TSS_ist, tss_struct, x86_tss.ist); BLANK(); diff -urNp linux-2.6.39.1/arch/x86/kernel/asm-offsets.c linux-2.6.39.1/arch/x86/kernel/asm-offsets.c --- linux-2.6.39.1/arch/x86/kernel/asm-offsets.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/asm-offsets.c 2011-05-25 17:35:48.000000000 -0400 @@ -33,6 +33,8 @@ void common(void) { OFFSET(TI_status, thread_info, status); OFFSET(TI_addr_limit, thread_info, addr_limit); OFFSET(TI_preempt_count, thread_info, preempt_count); + OFFSET(TI_lowest_stack, thread_info, lowest_stack); + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo)); BLANK(); OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); @@ -53,8 +55,26 @@ void common(void) { OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); + +#ifdef CONFIG_PAX_KERNEXEC + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0); +#endif + +#ifdef CONFIG_PAX_MEMORY_UDEREF + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3); + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3); +#ifdef CONFIG_X86_64 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd); +#endif #endif +#endif + + BLANK(); + DEFINE(PAGE_SIZE_asm, PAGE_SIZE); + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT); + DEFINE(THREAD_SIZE_asm, THREAD_SIZE); + #ifdef CONFIG_XEN BLANK(); OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask); diff -urNp linux-2.6.39.1/arch/x86/kernel/cpu/amd.c linux-2.6.39.1/arch/x86/kernel/cpu/amd.c --- linux-2.6.39.1/arch/x86/kernel/cpu/amd.c 2011-06-03 00:04:13.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/cpu/amd.c 2011-06-03 00:32:04.000000000 -0400 @@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c unsigned int size) { /* AMD errata T13 (order #21922) */ - if ((c->x86 == 6)) { + if (c->x86 == 6) { /* Duron Rev A0 */ if (c->x86_model == 3 && c->x86_mask == 0) size = 64; diff -urNp linux-2.6.39.1/arch/x86/kernel/cpu/common.c linux-2.6.39.1/arch/x86/kernel/cpu/common.c --- linux-2.6.39.1/arch/x86/kernel/cpu/common.c 2011-06-03 00:04:13.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/cpu/common.c 2011-06-03 00:32:04.000000000 -0400 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { -#ifdef CONFIG_X86_64 - /* - * We need valid kernel segments for data and code in long mode too - * IRET will check the segment types kkeil 2000/10/28 - * Also sysret mandates a special GDT layout - * - * TLS descriptors are currently at a different place compared to i386. - * Hopefully nobody expects them at a fixed place (Wine?) - */ - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), -#else - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), - /* - * Segments used for calling PnP BIOS have byte granularity. - * They code segments and data segments have fixed 64k limits, - * the transfer segment sizes are set at run time. - */ - /* 32-bit code */ - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), - /* 16-bit code */ - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), - /* 16-bit data */ - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), - /* 16-bit data */ - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), - /* 16-bit data */ - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), - /* - * The APM segments have byte granularity and their bases - * are set at run time. All have 64k limits. - */ - /* 32-bit code */ - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), - /* 16-bit code */ - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), - /* data */ - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), - - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), - GDT_STACK_CANARY_INIT -#endif -} }; -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); - static int __init x86_xsave_setup(char *s) { setup_clear_cpu_cap(X86_FEATURE_XSAVE); @@ -352,7 +298,7 @@ void switch_to_new_gdt(int cpu) { struct desc_ptr gdt_descr; - gdt_descr.address = (long)get_cpu_gdt_table(cpu); + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); /* Reload the per-cpu base */ @@ -824,6 +770,10 @@ static void __cpuinit identify_cpu(struc /* Filter out anything that depends on CPUID levels we don't have */ filter_cpuid_features(c, true); +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32)) + setup_clear_cpu_cap(X86_FEATURE_SEP); +#endif + /* If the model name is still unset, do table lookup. */ if (!c->x86_model_id[0]) { const char *p; @@ -1003,6 +953,9 @@ static __init int setup_disablecpuid(cha } __setup("clearcpuid=", setup_disablecpuid); +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo; +EXPORT_PER_CPU_SYMBOL(current_tinfo); + #ifdef CONFIG_X86_64 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; @@ -1018,7 +971,7 @@ DEFINE_PER_CPU(struct task_struct *, cur EXPORT_PER_CPU_SYMBOL(current_task); DEFINE_PER_CPU(unsigned long, kernel_stack) = - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; + (unsigned long)&init_thread_union - 16 + THREAD_SIZE; EXPORT_PER_CPU_SYMBOL(kernel_stack); DEFINE_PER_CPU(char *, irq_stack_ptr) = @@ -1083,7 +1036,7 @@ struct pt_regs * __cpuinit idle_regs(str { memset(regs, 0, sizeof(struct pt_regs)); regs->fs = __KERNEL_PERCPU; - regs->gs = __KERNEL_STACK_CANARY; + savesegment(gs, regs->gs); return regs; } @@ -1138,7 +1091,7 @@ void __cpuinit cpu_init(void) int i; cpu = stack_smp_processor_id(); - t = &per_cpu(init_tss, cpu); + t = init_tss + cpu; oist = &per_cpu(orig_ist, cpu); #ifdef CONFIG_NUMA @@ -1164,7 +1117,7 @@ void __cpuinit cpu_init(void) switch_to_new_gdt(cpu); loadsegment(fs, 0); - load_idt((const struct desc_ptr *)&idt_descr); + load_idt(&idt_descr); memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); syscall_init(); @@ -1173,7 +1126,6 @@ void __cpuinit cpu_init(void) wrmsrl(MSR_KERNEL_GS_BASE, 0); barrier(); - x86_configure_nx(); if (cpu != 0) enable_x2apic(); @@ -1227,7 +1179,7 @@ void __cpuinit cpu_init(void) { int cpu = smp_processor_id(); struct task_struct *curr = current; - struct tss_struct *t = &per_cpu(init_tss, cpu); + struct tss_struct *t = init_tss + cpu; struct thread_struct *thread = &curr->thread; if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { diff -urNp linux-2.6.39.1/arch/x86/kernel/cpu/intel.c linux-2.6.39.1/arch/x86/kernel/cpu/intel.c --- linux-2.6.39.1/arch/x86/kernel/cpu/intel.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/cpu/intel.c 2011-05-22 19:36:30.000000000 -0400 @@ -161,7 +161,7 @@ static void __cpuinit trap_init_f00f_bug * Update the IDT descriptor and reload the IDT so that * it uses the read-only mapped virtual address. */ - idt_descr.address = fix_to_virt(FIX_F00F_IDT); + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT); load_idt(&idt_descr); } #endif diff -urNp linux-2.6.39.1/arch/x86/kernel/cpu/Makefile linux-2.6.39.1/arch/x86/kernel/cpu/Makefile --- linux-2.6.39.1/arch/x86/kernel/cpu/Makefile 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/cpu/Makefile 2011-05-22 19:36:30.000000000 -0400 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg CFLAGS_REMOVE_perf_event.o = -pg endif -# Make sure load_percpu_segment has no stackprotector -nostackp := $(call cc-option, -fno-stack-protector) -CFLAGS_common.o := $(nostackp) - obj-y := intel_cacheinfo.o scattered.o topology.o obj-y += proc.o capflags.o powerflags.o common.o obj-y += vmware.o hypervisor.o sched.o mshyperv.o diff -urNp linux-2.6.39.1/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.39.1/arch/x86/kernel/cpu/mcheck/mce.c --- linux-2.6.39.1/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-22 19:36:30.000000000 -0400 @@ -46,6 +46,7 @@ #include #include #include +#include #include "mce-internal.h" @@ -220,7 +221,7 @@ static void print_mce(struct mce *m) !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", m->cs, m->ip); - if (m->cs == __KERNEL_CS) + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS) print_symbol("{%s}", m->ip); pr_cont("\n"); } @@ -244,10 +245,10 @@ static void print_mce(struct mce *m) #define PANIC_TIMEOUT 5 /* 5 seconds */ -static atomic_t mce_paniced; +static atomic_unchecked_t mce_paniced; static int fake_panic; -static atomic_t mce_fake_paniced; +static atomic_unchecked_t mce_fake_paniced; /* Panic in progress. Enable interrupts and wait for final IPI */ static void wait_for_panic(void) @@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct /* * Make sure only one CPU runs in machine check panic */ - if (atomic_inc_return(&mce_paniced) > 1) + if (atomic_inc_return_unchecked(&mce_paniced) > 1) wait_for_panic(); barrier(); @@ -279,7 +280,7 @@ static void mce_panic(char *msg, struct console_verbose(); } else { /* Don't log too much for fake panic */ - if (atomic_inc_return(&mce_fake_paniced) > 1) + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1) return; } /* First print corrected ones that are still unlogged */ @@ -647,7 +648,7 @@ static int mce_timed_out(u64 *t) * might have been modified by someone else. */ rmb(); - if (atomic_read(&mce_paniced)) + if (atomic_read_unchecked(&mce_paniced)) wait_for_panic(); if (!monarch_timeout) goto out; @@ -1461,14 +1462,14 @@ void __cpuinit mcheck_cpu_init(struct cp */ static DEFINE_SPINLOCK(mce_state_lock); -static int open_count; /* #times opened */ +static local_t open_count; /* #times opened */ static int open_exclu; /* already open exclusive? */ static int mce_open(struct inode *inode, struct file *file) { spin_lock(&mce_state_lock); - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) { + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) { spin_unlock(&mce_state_lock); return -EBUSY; @@ -1476,7 +1477,7 @@ static int mce_open(struct inode *inode, if (file->f_flags & O_EXCL) open_exclu = 1; - open_count++; + local_inc(&open_count); spin_unlock(&mce_state_lock); @@ -1487,7 +1488,7 @@ static int mce_release(struct inode *ino { spin_lock(&mce_state_lock); - open_count--; + local_dec(&open_count); open_exclu = 0; spin_unlock(&mce_state_lock); @@ -2174,7 +2175,7 @@ struct dentry *mce_get_debugfs_dir(void) static void mce_reset(void) { cpu_missing = 0; - atomic_set(&mce_fake_paniced, 0); + atomic_set_unchecked(&mce_fake_paniced, 0); atomic_set(&mce_executing, 0); atomic_set(&mce_callin, 0); atomic_set(&global_nwo, 0); diff -urNp linux-2.6.39.1/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.39.1/arch/x86/kernel/cpu/mtrr/main.c --- linux-2.6.39.1/arch/x86/kernel/cpu/mtrr/main.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/cpu/mtrr/main.c 2011-05-22 19:36:30.000000000 -0400 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex); u64 size_or_mask, size_and_mask; static bool mtrr_aps_delayed_init; -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM]; +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only; const struct mtrr_ops *mtrr_if; diff -urNp linux-2.6.39.1/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.39.1/arch/x86/kernel/cpu/mtrr/mtrr.h --- linux-2.6.39.1/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-05-22 19:36:30.000000000 -0400 @@ -12,19 +12,19 @@ extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; struct mtrr_ops { - u32 vendor; - u32 use_intel_if; - void (*set)(unsigned int reg, unsigned long base, + const u32 vendor; + const u32 use_intel_if; + void (* const set)(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type); - void (*set_all)(void); + void (* const set_all)(void); - void (*get)(unsigned int reg, unsigned long *base, + void (* const get)(unsigned int reg, unsigned long *base, unsigned long *size, mtrr_type *type); - int (*get_free_region)(unsigned long base, unsigned long size, + int (* const get_free_region)(unsigned long base, unsigned long size, int replace_reg); - int (*validate_add_page)(unsigned long base, unsigned long size, + int (* const validate_add_page)(unsigned long base, unsigned long size, unsigned int type); - int (*have_wrcomb)(void); + int (* const have_wrcomb)(void); }; extern int generic_get_free_region(unsigned long base, unsigned long size, diff -urNp linux-2.6.39.1/arch/x86/kernel/cpu/perf_event.c linux-2.6.39.1/arch/x86/kernel/cpu/perf_event.c --- linux-2.6.39.1/arch/x86/kernel/cpu/perf_event.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/cpu/perf_event.c 2011-05-22 19:36:30.000000000 -0400 @@ -774,6 +774,8 @@ static int x86_schedule_events(struct cp int i, j, w, wmax, num = 0; struct hw_perf_event *hwc; + pax_track_stack(); + bitmap_zero(used_mask, X86_PMC_IDX_MAX); for (i = 0; i < n; i++) { @@ -1878,7 +1880,7 @@ perf_callchain_user(struct perf_callchai break; perf_callchain_store(entry, frame.return_address); - fp = frame.next_frame; + fp = (__force const void __user *)frame.next_frame; } } diff -urNp linux-2.6.39.1/arch/x86/kernel/crash.c linux-2.6.39.1/arch/x86/kernel/crash.c --- linux-2.6.39.1/arch/x86/kernel/crash.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/crash.c 2011-05-22 19:36:30.000000000 -0400 @@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu, regs = args->regs; #ifdef CONFIG_X86_32 - if (!user_mode_vm(regs)) { + if (!user_mode(regs)) { crash_fixup_ss_esp(&fixed_regs, regs); regs = &fixed_regs; } diff -urNp linux-2.6.39.1/arch/x86/kernel/doublefault_32.c linux-2.6.39.1/arch/x86/kernel/doublefault_32.c --- linux-2.6.39.1/arch/x86/kernel/doublefault_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/doublefault_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -11,7 +11,7 @@ #define DOUBLEFAULT_STACKSIZE (1024) static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE]; -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE) +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2) #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM) @@ -21,7 +21,7 @@ static void doublefault_fn(void) unsigned long gdt, tss; store_gdt(&gdt_desc); - gdt = gdt_desc.address; + gdt = (unsigned long)gdt_desc.address; printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach /* 0x2 bit is always set */ .flags = X86_EFLAGS_SF | 0x2, .sp = STACK_START, - .es = __USER_DS, + .es = __KERNEL_DS, .cs = __KERNEL_CS, .ss = __KERNEL_DS, - .ds = __USER_DS, + .ds = __KERNEL_DS, .fs = __KERNEL_PERCPU, .__cr3 = __pa_nodebug(swapper_pg_dir), diff -urNp linux-2.6.39.1/arch/x86/kernel/dumpstack_32.c linux-2.6.39.1/arch/x86/kernel/dumpstack_32.c --- linux-2.6.39.1/arch/x86/kernel/dumpstack_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/dumpstack_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task bp = stack_frame(task, regs); for (;;) { - struct thread_info *context; + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1)); - context = (struct thread_info *) - ((unsigned long)stack & (~(THREAD_SIZE - 1))); - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph); + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph); - stack = (unsigned long *)context->previous_esp; - if (!stack) + if (stack_start == task_stack_page(task)) break; + stack = *(unsigned long **)stack_start; if (ops->stack(data, "IRQ") < 0) break; touch_nmi_watchdog(); @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs * When in-kernel, we also print out the stack and code at the * time of the fault.. */ - if (!user_mode_vm(regs)) { + if (!user_mode(regs)) { unsigned int code_prologue = code_bytes * 43 / 64; unsigned int code_len = code_bytes; unsigned char c; u8 *ip; + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]); printk(KERN_EMERG "Stack:\n"); show_stack_log_lvl(NULL, regs, ®s->sp, 0, KERN_EMERG); printk(KERN_EMERG "Code: "); - ip = (u8 *)regs->ip - code_prologue; + ip = (u8 *)regs->ip - code_prologue + cs_base; if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { /* try starting at IP */ - ip = (u8 *)regs->ip; + ip = (u8 *)regs->ip + cs_base; code_len = code_len - code_prologue + 1; } for (i = 0; i < code_len; i++, ip++) { @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs printk(" Bad EIP value."); break; } - if (ip == (u8 *)regs->ip) + if (ip == (u8 *)regs->ip + cs_base) printk("<%02x> ", c); else printk("%02x ", c); @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip) { unsigned short ud2; + ip = ktla_ktva(ip); if (ip < PAGE_OFFSET) return 0; if (probe_kernel_address((unsigned short *)ip, ud2)) diff -urNp linux-2.6.39.1/arch/x86/kernel/dumpstack_64.c linux-2.6.39.1/arch/x86/kernel/dumpstack_64.c --- linux-2.6.39.1/arch/x86/kernel/dumpstack_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/dumpstack_64.c 2011-05-22 19:36:30.000000000 -0400 @@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task unsigned long *irq_stack_end = (unsigned long *)per_cpu(irq_stack_ptr, cpu); unsigned used = 0; - struct thread_info *tinfo; int graph = 0; unsigned long dummy; + void *stack_start; if (!task) task = current; @@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task * current stack address. If the stacks consist of nested * exceptions */ - tinfo = task_thread_info(task); for (;;) { char *id; unsigned long *estack_end; + estack_end = in_exception_stack(cpu, (unsigned long)stack, &used, &id); @@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task if (ops->stack(data, id) < 0) break; - bp = ops->walk_stack(tinfo, stack, bp, ops, + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops, data, estack_end, &graph); ops->stack(data, ""); /* @@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task if (in_irq_stack(stack, irq_stack, irq_stack_end)) { if (ops->stack(data, "IRQ") < 0) break; - bp = ops->walk_stack(tinfo, stack, bp, + bp = ops->walk_stack(task, irq_stack, stack, bp, ops, data, irq_stack_end, &graph); /* * We link to the next stack (which would be @@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task /* * This handles the process stack: */ - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1)); + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph); put_cpu(); } EXPORT_SYMBOL(dump_trace); diff -urNp linux-2.6.39.1/arch/x86/kernel/dumpstack.c linux-2.6.39.1/arch/x86/kernel/dumpstack.c --- linux-2.6.39.1/arch/x86/kernel/dumpstack.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/dumpstack.c 2011-05-22 19:41:32.000000000 -0400 @@ -2,6 +2,9 @@ * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs */ +#ifdef CONFIG_GRKERNSEC_HIDESYM +#define __INCLUDED_BY_HIDESYM 1 +#endif #include #include #include @@ -35,9 +38,8 @@ void printk_address(unsigned long addres static void print_ftrace_graph_addr(unsigned long addr, void *data, const struct stacktrace_ops *ops, - struct thread_info *tinfo, int *graph) + struct task_struct *task, int *graph) { - struct task_struct *task = tinfo->task; unsigned long ret_addr; int index = task->curr_ret_stack; @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad static inline void print_ftrace_graph_addr(unsigned long addr, void *data, const struct stacktrace_ops *ops, - struct thread_info *tinfo, int *graph) + struct task_struct *task, int *graph) { } #endif @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack */ -static inline int valid_stack_ptr(struct thread_info *tinfo, - void *p, unsigned int size, void *end) +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end) { - void *t = tinfo; if (end) { if (p < end && p >= (end-THREAD_SIZE)) return 1; @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct } unsigned long -print_context_stack(struct thread_info *tinfo, +print_context_stack(struct task_struct *task, void *stack_start, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data, unsigned long *end, int *graph) { struct stack_frame *frame = (struct stack_frame *)bp; - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) { unsigned long addr; addr = *stack; @@ -102,7 +102,7 @@ print_context_stack(struct thread_info * } else { ops->address(data, addr, 0); } - print_ftrace_graph_addr(addr, data, ops, tinfo, graph); + print_ftrace_graph_addr(addr, data, ops, task, graph); } stack++; } @@ -111,7 +111,7 @@ print_context_stack(struct thread_info * EXPORT_SYMBOL_GPL(print_context_stack); unsigned long -print_context_stack_bp(struct thread_info *tinfo, +print_context_stack_bp(struct task_struct *task, void *stack_start, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data, unsigned long *end, int *graph) @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf struct stack_frame *frame = (struct stack_frame *)bp; unsigned long *ret_addr = &frame->return_address; - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) { + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) { unsigned long addr = *ret_addr; if (!__kernel_text_address(addr)) @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf ops->address(data, addr, 1); frame = frame->next_frame; ret_addr = &frame->return_address; - print_ftrace_graph_addr(addr, data, ops, tinfo, graph); + print_ftrace_graph_addr(addr, data, ops, task, graph); } return (unsigned long)frame; @@ -202,7 +202,7 @@ void dump_stack(void) bp = stack_frame(current, NULL); printk("Pid: %d, comm: %.20s %s %s %.*s\n", - current->pid, current->comm, print_tainted(), + task_pid_nr(current), current->comm, print_tainted(), init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); @@ -238,6 +238,8 @@ unsigned __kprobes long oops_begin(void) } EXPORT_SYMBOL_GPL(oops_begin); +extern void gr_handle_kernel_exploit(void); + void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) { if (regs && kexec_should_crash(current)) @@ -259,7 +261,10 @@ void __kprobes oops_end(unsigned long fl panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); - do_exit(signr); + + gr_handle_kernel_exploit(); + + do_group_exit(signr); } int __kprobes __die(const char *str, struct pt_regs *regs, long err) @@ -286,7 +291,7 @@ int __kprobes __die(const char *str, str show_registers(regs); #ifdef CONFIG_X86_32 - if (user_mode_vm(regs)) { + if (user_mode(regs)) { sp = regs->sp; ss = regs->ss & 0xffff; } else { @@ -314,7 +319,7 @@ void die(const char *str, struct pt_regs unsigned long flags = oops_begin(); int sig = SIGSEGV; - if (!user_mode_vm(regs)) + if (!user_mode(regs)) report_bug(regs->ip, regs); if (__die(str, regs, err)) diff -urNp linux-2.6.39.1/arch/x86/kernel/early_printk.c linux-2.6.39.1/arch/x86/kernel/early_printk.c --- linux-2.6.39.1/arch/x86/kernel/early_printk.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/early_printk.c 2011-05-22 19:36:30.000000000 -0400 @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -179,6 +180,8 @@ asmlinkage void early_printk(const char int n; va_list ap; + pax_track_stack(); + va_start(ap, fmt); n = vscnprintf(buf, sizeof(buf), fmt, ap); early_console->write(early_console, buf, n); diff -urNp linux-2.6.39.1/arch/x86/kernel/entry_32.S linux-2.6.39.1/arch/x86/kernel/entry_32.S --- linux-2.6.39.1/arch/x86/kernel/entry_32.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/entry_32.S 2011-05-23 17:07:00.000000000 -0400 @@ -185,13 +185,146 @@ /*CFI_REL_OFFSET gs, PT_GS*/ .endm .macro SET_KERNEL_GS reg + +#ifdef CONFIG_CC_STACKPROTECTOR movl $(__KERNEL_STACK_CANARY), \reg +#elif defined(CONFIG_PAX_MEMORY_UDEREF) + movl $(__USER_DS), \reg +#else + xorl \reg, \reg +#endif + movl \reg, %gs .endm #endif /* CONFIG_X86_32_LAZY_GS */ -.macro SAVE_ALL +.macro pax_enter_kernel +#ifdef CONFIG_PAX_KERNEXEC + call pax_enter_kernel +#endif +.endm + +.macro pax_exit_kernel +#ifdef CONFIG_PAX_KERNEXEC + call pax_exit_kernel +#endif +.endm + +#ifdef CONFIG_PAX_KERNEXEC +ENTRY(pax_enter_kernel) +#ifdef CONFIG_PARAVIRT + pushl %eax + pushl %ecx + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0) + mov %eax, %esi +#else + mov %cr0, %esi +#endif + bts $16, %esi + jnc 1f + mov %cs, %esi + cmp $__KERNEL_CS, %esi + jz 3f + ljmp $__KERNEL_CS, $3f +1: ljmp $__KERNEXEC_KERNEL_CS, $2f +2: +#ifdef CONFIG_PARAVIRT + mov %esi, %eax + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0) +#else + mov %esi, %cr0 +#endif +3: +#ifdef CONFIG_PARAVIRT + popl %ecx + popl %eax +#endif + ret +ENDPROC(pax_enter_kernel) + +ENTRY(pax_exit_kernel) +#ifdef CONFIG_PARAVIRT + pushl %eax + pushl %ecx +#endif + mov %cs, %esi + cmp $__KERNEXEC_KERNEL_CS, %esi + jnz 2f +#ifdef CONFIG_PARAVIRT + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); + mov %eax, %esi +#else + mov %cr0, %esi +#endif + btr $16, %esi + ljmp $__KERNEL_CS, $1f +1: +#ifdef CONFIG_PARAVIRT + mov %esi, %eax + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0); +#else + mov %esi, %cr0 +#endif +2: +#ifdef CONFIG_PARAVIRT + popl %ecx + popl %eax +#endif + ret +ENDPROC(pax_exit_kernel) +#endif + +.macro pax_erase_kstack +#ifdef CONFIG_PAX_MEMORY_STACKLEAK + call pax_erase_kstack +#endif +.endm + +#ifdef CONFIG_PAX_MEMORY_STACKLEAK +/* + * ebp: thread_info + * ecx, edx: can be clobbered + */ +ENTRY(pax_erase_kstack) + pushl %edi + pushl %eax + + mov TI_lowest_stack(%ebp), %edi + mov $-0xBEEF, %eax + std + +1: mov %edi, %ecx + and $THREAD_SIZE_asm - 1, %ecx + shr $2, %ecx + repne scasl + jecxz 2f + + cmp $2*16, %ecx + jc 2f + + mov $2*16, %ecx + repe scasl + jecxz 2f + jne 1b + +2: cld + mov %esp, %ecx + sub %edi, %ecx + shr $2, %ecx + rep stosl + + mov TI_task_thread_sp0(%ebp), %edi + sub $128, %edi + mov %edi, TI_lowest_stack(%ebp) + + popl %eax + popl %edi + ret +ENDPROC(pax_erase_kstack) +#endif + +.macro __SAVE_ALL _DS cld PUSH_GS pushl_cfi %fs @@ -214,7 +347,7 @@ CFI_REL_OFFSET ecx, 0 pushl_cfi %ebx CFI_REL_OFFSET ebx, 0 - movl $(__USER_DS), %edx + movl $\_DS, %edx movl %edx, %ds movl %edx, %es movl $(__KERNEL_PERCPU), %edx @@ -222,6 +355,15 @@ SET_KERNEL_GS %edx .endm +.macro SAVE_ALL +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) + __SAVE_ALL __KERNEL_DS + pax_enter_kernel +#else + __SAVE_ALL __USER_DS +#endif +.endm + .macro RESTORE_INT_REGS popl_cfi %ebx CFI_RESTORE ebx @@ -332,7 +474,15 @@ check_userspace: movb PT_CS(%esp), %al andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax cmpl $USER_RPL, %eax + +#ifdef CONFIG_PAX_KERNEXEC + jae resume_userspace + + PAX_EXIT_KERNEL + jmp resume_kernel +#else jb resume_kernel # not returning to v8086 or userspace +#endif ENTRY(resume_userspace) LOCKDEP_SYS_EXIT @@ -344,7 +494,7 @@ ENTRY(resume_userspace) andl $_TIF_WORK_MASK, %ecx # is there any work to be done on # int/exception return? jne work_pending - jmp restore_all + jmp restore_all_pax END(ret_from_exception) #ifdef CONFIG_PREEMPT @@ -394,23 +544,34 @@ sysenter_past_esp: /*CFI_REL_OFFSET cs, 0*/ /* * Push current_thread_info()->sysenter_return to the stack. - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words - * pushed above; +8 corresponds to copy_thread's esp0 setting. */ - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp) + pushl_cfi $0 CFI_REL_OFFSET eip, 0 pushl_cfi %eax SAVE_ALL + GET_THREAD_INFO(%ebp) + movl TI_sysenter_return(%ebp),%ebp + movl %ebp,PT_EIP(%esp) ENABLE_INTERRUPTS(CLBR_NONE) /* * Load the potential sixth argument from user stack. * Careful about security. */ + movl PT_OLDESP(%esp),%ebp + +#ifdef CONFIG_PAX_MEMORY_UDEREF + mov PT_OLDSS(%esp),%ds +1: movl %ds:(%ebp),%ebp + push %ss + pop %ds +#else cmpl $__PAGE_OFFSET-3,%ebp jae syscall_fault 1: movl (%ebp),%ebp +#endif + movl %ebp,PT_EBP(%esp) .section __ex_table,"a" .align 4 @@ -433,12 +594,23 @@ sysenter_do_call: testl $_TIF_ALLWORK_MASK, %ecx jne sysexit_audit sysenter_exit: + +#ifdef CONFIG_PAX_RANDKSTACK + pushl_cfi %eax + call pax_randomize_kstack + popl_cfi %eax +#endif + + pax_erase_kstack + /* if something modifies registers it must also disable sysexit */ movl PT_EIP(%esp), %edx movl PT_OLDESP(%esp), %ecx xorl %ebp,%ebp TRACE_IRQS_ON 1: mov PT_FS(%esp), %fs +2: mov PT_DS(%esp), %ds +3: mov PT_ES(%esp), %es PTGS_TO_GS ENABLE_INTERRUPTS_SYSEXIT @@ -455,6 +627,9 @@ sysenter_audit: movl %eax,%edx /* 2nd arg: syscall number */ movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */ call audit_syscall_entry + + pax_erase_kstack + pushl_cfi %ebx movl PT_EAX(%esp),%eax /* reload syscall number */ jmp sysenter_do_call @@ -481,11 +656,17 @@ sysexit_audit: CFI_ENDPROC .pushsection .fixup,"ax" -2: movl $0,PT_FS(%esp) +4: movl $0,PT_FS(%esp) + jmp 1b +5: movl $0,PT_DS(%esp) + jmp 1b +6: movl $0,PT_ES(%esp) jmp 1b .section __ex_table,"a" .align 4 - .long 1b,2b + .long 1b,4b + .long 2b,5b + .long 3b,6b .popsection PTGS_TO_GS_EX ENDPROC(ia32_sysenter_target) @@ -518,6 +699,14 @@ syscall_exit: testl $_TIF_ALLWORK_MASK, %ecx # current->work jne syscall_exit_work +restore_all_pax: + +#ifdef CONFIG_PAX_RANDKSTACK + call pax_randomize_kstack +#endif + + pax_erase_kstack + restore_all: TRACE_IRQS_IRET restore_all_notrace: @@ -577,14 +766,21 @@ ldt_ss: * compensating for the offset by changing to the ESPFIX segment with * a base address that matches for the difference. */ -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx) mov %esp, %edx /* load kernel esp */ mov PT_OLDESP(%esp), %eax /* load userspace esp */ mov %dx, %ax /* eax: new kernel esp */ sub %eax, %edx /* offset (low word is 0) */ +#ifdef CONFIG_SMP + movl PER_CPU_VAR(cpu_number), %ebx + shll $PAGE_SHIFT_asm, %ebx + addl $cpu_gdt_table, %ebx +#else + movl $cpu_gdt_table, %ebx +#endif shr $16, %edx - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */ + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */ pushl_cfi $__ESPFIX_SS pushl_cfi %eax /* new kernel esp */ /* Disable interrupts, but do not irqtrace this section: we @@ -613,29 +809,23 @@ work_resched: movl TI_flags(%ebp), %ecx andl $_TIF_WORK_MASK, %ecx # is there any work to be done other # than syscall tracing? - jz restore_all + jz restore_all_pax testb $_TIF_NEED_RESCHED, %cl jnz work_resched work_notifysig: # deal with pending signals and # notify-resume requests + movl %esp, %eax #ifdef CONFIG_VM86 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) - movl %esp, %eax - jne work_notifysig_v86 # returning to kernel-space or + jz 1f # returning to kernel-space or # vm86-space - xorl %edx, %edx - call do_notify_resume - jmp resume_userspace_sig - ALIGN -work_notifysig_v86: pushl_cfi %ecx # save ti_flags for do_notify_resume call save_v86_state # %eax contains pt_regs pointer popl_cfi %ecx movl %eax, %esp -#else - movl %esp, %eax +1: #endif xorl %edx, %edx call do_notify_resume @@ -648,6 +838,9 @@ syscall_trace_entry: movl $-ENOSYS,PT_EAX(%esp) movl %esp, %eax call syscall_trace_enter + + pax_erase_kstack + /* What it returned is what we'll actually use. */ cmpl $(nr_syscalls), %eax jnae syscall_call @@ -670,6 +863,10 @@ END(syscall_exit_work) RING0_INT_FRAME # can't unwind into user space anyway syscall_fault: +#ifdef CONFIG_PAX_MEMORY_UDEREF + push %ss + pop %ds +#endif GET_THREAD_INFO(%ebp) movl $-EFAULT,PT_EAX(%esp) jmp resume_userspace @@ -752,6 +949,36 @@ ptregs_clone: CFI_ENDPROC ENDPROC(ptregs_clone) + ALIGN; +ENTRY(kernel_execve) + CFI_STARTPROC + pushl_cfi %ebp + sub $PT_OLDSS+4,%esp + pushl_cfi %edi + pushl_cfi %ecx + pushl_cfi %eax + lea 3*4(%esp),%edi + mov $PT_OLDSS/4+1,%ecx + xorl %eax,%eax + rep stosl + popl_cfi %eax + popl_cfi %ecx + popl_cfi %edi + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp) + pushl_cfi %esp + call sys_execve + add $4,%esp + CFI_ADJUST_CFA_OFFSET -4 + GET_THREAD_INFO(%ebp) + test %eax,%eax + jz syscall_exit + add $PT_OLDSS+4,%esp + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4 + popl_cfi %ebp + ret + CFI_ENDPROC +ENDPROC(kernel_execve) + .macro FIXUP_ESPFIX_STACK /* * Switch back for ESPFIX stack to the normal zerobased stack @@ -761,8 +988,15 @@ ENDPROC(ptregs_clone) * normal stack and adjusts ESP with the matching offset. */ /* fixup the stack */ - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ +#ifdef CONFIG_SMP + movl PER_CPU_VAR(cpu_number), %ebx + shll $PAGE_SHIFT_asm, %ebx + addl $cpu_gdt_table, %ebx +#else + movl $cpu_gdt_table, %ebx +#endif + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */ + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */ shl $16, %eax addl %esp, %eax /* the adjusted stack pointer */ pushl_cfi $__KERNEL_DS @@ -1213,7 +1447,6 @@ return_to_handler: jmp *%ecx #endif -.section .rodata,"a" #include "syscall_table_32.S" syscall_table_size=(.-sys_call_table) @@ -1259,9 +1492,12 @@ error_code: movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart REG_TO_PTGS %ecx SET_KERNEL_GS %ecx - movl $(__USER_DS), %ecx + movl $(__KERNEL_DS), %ecx movl %ecx, %ds movl %ecx, %es + + pax_enter_kernel + TRACE_IRQS_OFF movl %esp,%eax # pt_regs pointer call *%edi @@ -1346,6 +1582,9 @@ nmi_stack_correct: xorl %edx,%edx # zero error code movl %esp,%eax # pt_regs pointer call do_nmi + + pax_exit_kernel + jmp restore_all_notrace CFI_ENDPROC @@ -1382,6 +1621,9 @@ nmi_espfix_stack: FIXUP_ESPFIX_STACK # %eax == %esp xorl %edx,%edx # zero error code call do_nmi + + pax_exit_kernel + RESTORE_REGS lss 12+4(%esp), %esp # back to espfix stack CFI_ADJUST_CFA_OFFSET -24 diff -urNp linux-2.6.39.1/arch/x86/kernel/entry_64.S linux-2.6.39.1/arch/x86/kernel/entry_64.S --- linux-2.6.39.1/arch/x86/kernel/entry_64.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/entry_64.S 2011-05-23 17:10:49.000000000 -0400 @@ -53,6 +53,7 @@ #include #include #include +#include /* Avoid __ASSEMBLER__'ifying just for this. */ #include @@ -176,6 +177,259 @@ ENTRY(native_usergs_sysret64) ENDPROC(native_usergs_sysret64) #endif /* CONFIG_PARAVIRT */ + .macro ljmpq sel, off +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM) + .byte 0x48; ljmp *1234f(%rip) + .pushsection .rodata + .align 16 + 1234: .quad \off; .word \sel + .popsection +#else + pushq $\sel + pushq $\off + lretq +#endif + .endm + + .macro pax_enter_kernel +#ifdef CONFIG_PAX_KERNEXEC + call pax_enter_kernel +#endif + .endm + + .macro pax_exit_kernel +#ifdef CONFIG_PAX_KERNEXEC + call pax_exit_kernel +#endif + .endm + +#ifdef CONFIG_PAX_KERNEXEC +ENTRY(pax_enter_kernel) + pushq %rdi + +#ifdef CONFIG_PARAVIRT + PV_SAVE_REGS(CLBR_RDI) +#endif + + GET_CR0_INTO_RDI + bts $16,%rdi + jnc 1f + mov %cs,%edi + cmp $__KERNEL_CS,%edi + jz 3f + ljmpq __KERNEL_CS,3f +1: ljmpq __KERNEXEC_KERNEL_CS,2f +2: SET_RDI_INTO_CR0 +3: + +#ifdef CONFIG_PARAVIRT + PV_RESTORE_REGS(CLBR_RDI) +#endif + + popq %rdi + retq +ENDPROC(pax_enter_kernel) + +ENTRY(pax_exit_kernel) + pushq %rdi + +#ifdef CONFIG_PARAVIRT + PV_SAVE_REGS(CLBR_RDI) +#endif + + mov %cs,%rdi + cmp $__KERNEXEC_KERNEL_CS,%edi + jnz 2f + GET_CR0_INTO_RDI + btr $16,%rdi + ljmpq __KERNEL_CS,1f +1: SET_RDI_INTO_CR0 +2: + +#ifdef CONFIG_PARAVIRT + PV_RESTORE_REGS(CLBR_RDI); +#endif + + popq %rdi + retq +ENDPROC(pax_exit_kernel) +#endif + + .macro pax_enter_kernel_user +#ifdef CONFIG_PAX_MEMORY_UDEREF + call pax_enter_kernel_user +#endif + .endm + + .macro pax_exit_kernel_user +#ifdef CONFIG_PAX_MEMORY_UDEREF + call pax_exit_kernel_user +#endif +#ifdef CONFIG_PAX_RANDKSTACK + push %rax + call pax_randomize_kstack + pop %rax +#endif +#ifdef CONFIG_PAX_MEMORY_STACKLEAK + call pax_erase_kstack +#endif + .endm + +#ifdef CONFIG_PAX_MEMORY_UDEREF +ENTRY(pax_enter_kernel_user) + pushq %rdi + pushq %rbx + +#ifdef CONFIG_PARAVIRT + PV_SAVE_REGS(CLBR_RDI) +#endif + + GET_CR3_INTO_RDI + mov %rdi,%rbx + add $__START_KERNEL_map,%rbx + sub phys_base(%rip),%rbx + +#ifdef CONFIG_PARAVIRT + pushq %rdi + cmpl $0, pv_info+PARAVIRT_enabled + jz 1f + i = 0 + .rept USER_PGD_PTRS + mov i*8(%rbx),%rsi + mov $0,%sil + lea i*8(%rbx),%rdi + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd) + i = i + 1 + .endr + jmp 2f +1: +#endif + + i = 0 + .rept USER_PGD_PTRS + movb $0,i*8(%rbx) + i = i + 1 + .endr + +#ifdef CONFIG_PARAVIRT +2: popq %rdi +#endif + SET_RDI_INTO_CR3 + +#ifdef CONFIG_PAX_KERNEXEC + GET_CR0_INTO_RDI + bts $16,%rdi + SET_RDI_INTO_CR0 +#endif + +#ifdef CONFIG_PARAVIRT + PV_RESTORE_REGS(CLBR_RDI) +#endif + + popq %rbx + popq %rdi + retq +ENDPROC(pax_enter_kernel_user) + +ENTRY(pax_exit_kernel_user) + push %rdi + +#ifdef CONFIG_PARAVIRT + pushq %rbx + PV_SAVE_REGS(CLBR_RDI) +#endif + +#ifdef CONFIG_PAX_KERNEXEC + GET_CR0_INTO_RDI + btr $16,%rdi + SET_RDI_INTO_CR0 +#endif + + GET_CR3_INTO_RDI + add $__START_KERNEL_map,%rdi + sub phys_base(%rip),%rdi + +#ifdef CONFIG_PARAVIRT + cmpl $0, pv_info+PARAVIRT_enabled + jz 1f + mov %rdi,%rbx + i = 0 + .rept USER_PGD_PTRS + mov i*8(%rbx),%rsi + mov $0x67,%sil + lea i*8(%rbx),%rdi + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd) + i = i + 1 + .endr + jmp 2f +1: +#endif + + i = 0 + .rept USER_PGD_PTRS + movb $0x67,i*8(%rdi) + i = i + 1 + .endr + +#ifdef CONFIG_PARAVIRT +2: PV_RESTORE_REGS(CLBR_RDI) + popq %rbx +#endif + + popq %rdi + retq +ENDPROC(pax_exit_kernel_user) +#endif + + .macro pax_erase_kstack +#ifdef CONFIG_PAX_MEMORY_STACKLEAK + call pax_erase_kstack +#endif + .endm + +#ifdef CONFIG_PAX_MEMORY_STACKLEAK +/* + * r10: thread_info + * rcx, rdx: can be clobbered + */ +ENTRY(pax_erase_kstack) + pushq %rdi + pushq %rax + + GET_THREAD_INFO(%r10) + mov TI_lowest_stack(%r10), %rdi + mov $-0xBEEF, %rax + std + +1: mov %edi, %ecx + and $THREAD_SIZE_asm - 1, %ecx + shr $3, %ecx + repne scasq + jecxz 2f + + cmp $2*8, %ecx + jc 2f + + mov $2*8, %ecx + repe scasq + jecxz 2f + jne 1b + +2: cld + mov %esp, %ecx + sub %edi, %ecx + shr $3, %ecx + rep stosq + + mov TI_task_thread_sp0(%r10), %rdi + sub $256, %rdi + mov %rdi, TI_lowest_stack(%r10) + + popq %rax + popq %rdi + ret +ENDPROC(pax_erase_kstack) +#endif .macro TRACE_IRQS_IRETQ offset=ARGOFFSET #ifdef CONFIG_TRACE_IRQFLAGS @@ -318,7 +572,7 @@ ENTRY(save_args) leaq -RBP+8(%rsp),%rdi /* arg1 for handler */ movq_cfi rbp, 8 /* push %rbp */ leaq 8(%rsp), %rbp /* mov %rsp, %ebp */ - testl $3, CS(%rdi) + testb $3, CS(%rdi) je 1f SWAPGS /* @@ -409,7 +663,7 @@ ENTRY(ret_from_fork) RESTORE_REST - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread? je int_ret_from_sys_call testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET @@ -455,7 +709,7 @@ END(ret_from_fork) ENTRY(system_call) CFI_STARTPROC simple CFI_SIGNAL_FRAME - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET + CFI_DEF_CFA rsp,0 CFI_REGISTER rip,rcx /*CFI_REGISTER rflags,r11*/ SWAPGS_UNSAFE_STACK @@ -468,12 +722,13 @@ ENTRY(system_call_after_swapgs) movq %rsp,PER_CPU_VAR(old_rsp) movq PER_CPU_VAR(kernel_stack),%rsp + pax_enter_kernel_user /* * No need to follow this irqs off/on section - it's straight * and short: */ ENABLE_INTERRUPTS(CLBR_NONE) - SAVE_ARGS 8,1 + SAVE_ARGS 8*6,1 movq %rax,ORIG_RAX-ARGOFFSET(%rsp) movq %rcx,RIP-ARGOFFSET(%rsp) CFI_REL_OFFSET rip,RIP-ARGOFFSET @@ -502,6 +757,7 @@ sysret_check: andl %edi,%edx jnz sysret_careful CFI_REMEMBER_STATE + pax_exit_kernel_user /* * sysretq will re-enable interrupts: */ @@ -560,6 +816,9 @@ auditsys: movq %rax,%rsi /* 2nd arg: syscall number */ movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */ call audit_syscall_entry + + pax_erase_kstack + LOAD_ARGS 0 /* reload call-clobbered registers */ jmp system_call_fastpath @@ -590,6 +849,9 @@ tracesys: FIXUP_TOP_OF_STACK %rdi movq %rsp,%rdi call syscall_trace_enter + + pax_erase_kstack + /* * Reload arg registers from stack in case ptrace changed them. * We don't reload %rax because syscall_trace_enter() returned @@ -611,7 +873,7 @@ tracesys: GLOBAL(int_ret_from_sys_call) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF - testl $3,CS-ARGOFFSET(%rsp) + testb $3,CS-ARGOFFSET(%rsp) je retint_restore_args movl $_TIF_ALLWORK_MASK,%edi /* edi: mask to check */ @@ -793,6 +1055,16 @@ END(interrupt) CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP call save_args PARTIAL_FRAME 0 +#ifdef CONFIG_PAX_MEMORY_UDEREF + testb $3, CS(%rdi) + jnz 1f + pax_enter_kernel + jmp 2f +1: pax_enter_kernel_user +2: +#else + pax_enter_kernel +#endif call \func .endm @@ -825,7 +1097,7 @@ ret_from_intr: CFI_ADJUST_CFA_OFFSET -8 exit_intr: GET_THREAD_INFO(%rcx) - testl $3,CS-ARGOFFSET(%rsp) + testb $3,CS-ARGOFFSET(%rsp) je retint_kernel /* Interrupt came from user space */ @@ -847,12 +1119,14 @@ retint_swapgs: /* return to user-space * The iretq could re-enable interrupts: */ DISABLE_INTERRUPTS(CLBR_ANY) + pax_exit_kernel_user TRACE_IRQS_IRETQ SWAPGS jmp restore_args retint_restore_args: /* return to kernel space */ DISABLE_INTERRUPTS(CLBR_ANY) + pax_exit_kernel /* * The iretq could re-enable interrupts: */ @@ -1027,6 +1301,16 @@ ENTRY(\sym) CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 call error_entry DEFAULT_FRAME 0 +#ifdef CONFIG_PAX_MEMORY_UDEREF + testb $3, CS(%rsp) + jnz 1f + pax_enter_kernel + jmp 2f +1: pax_enter_kernel_user +2: +#else + pax_enter_kernel +#endif movq %rsp,%rdi /* pt_regs pointer */ xorl %esi,%esi /* no error code */ call \do_sym @@ -1044,6 +1328,16 @@ ENTRY(\sym) CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 call save_paranoid TRACE_IRQS_OFF +#ifdef CONFIG_PAX_MEMORY_UDEREF + testb $3, CS(%rsp) + jnz 1f + pax_enter_kernel + jmp 2f +1: pax_enter_kernel_user +2: +#else + pax_enter_kernel +#endif movq %rsp,%rdi /* pt_regs pointer */ xorl %esi,%esi /* no error code */ call \do_sym @@ -1052,7 +1346,7 @@ ENTRY(\sym) END(\sym) .endm -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12) .macro paranoidzeroentry_ist sym do_sym ist ENTRY(\sym) INTR_FRAME @@ -1062,8 +1356,24 @@ ENTRY(\sym) CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 call save_paranoid TRACE_IRQS_OFF +#ifdef CONFIG_PAX_MEMORY_UDEREF + testb $3, CS(%rsp) + jnz 1f + pax_enter_kernel + jmp 2f +1: pax_enter_kernel_user +2: +#else + pax_enter_kernel +#endif movq %rsp,%rdi /* pt_regs pointer */ xorl %esi,%esi /* no error code */ +#ifdef CONFIG_SMP + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d + lea init_tss(%r12), %r12 +#else + lea init_tss(%rip), %r12 +#endif subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) call \do_sym addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) @@ -1080,6 +1390,16 @@ ENTRY(\sym) CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 call error_entry DEFAULT_FRAME 0 +#ifdef CONFIG_PAX_MEMORY_UDEREF + testb $3, CS(%rsp) + jnz 1f + pax_enter_kernel + jmp 2f +1: pax_enter_kernel_user +2: +#else + pax_enter_kernel +#endif movq %rsp,%rdi /* pt_regs pointer */ movq ORIG_RAX(%rsp),%rsi /* get error code */ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ @@ -1099,6 +1419,16 @@ ENTRY(\sym) call save_paranoid DEFAULT_FRAME 0 TRACE_IRQS_OFF +#ifdef CONFIG_PAX_MEMORY_UDEREF + testb $3, CS(%rsp) + jnz 1f + pax_enter_kernel + jmp 2f +1: pax_enter_kernel_user +2: +#else + pax_enter_kernel +#endif movq %rsp,%rdi /* pt_regs pointer */ movq ORIG_RAX(%rsp),%rsi /* get error code */ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ @@ -1361,14 +1691,27 @@ ENTRY(paranoid_exit) TRACE_IRQS_OFF testl %ebx,%ebx /* swapgs needed? */ jnz paranoid_restore - testl $3,CS(%rsp) + testb $3,CS(%rsp) jnz paranoid_userspace +#ifdef CONFIG_PAX_MEMORY_UDEREF + pax_exit_kernel + TRACE_IRQS_IRETQ 0 + SWAPGS_UNSAFE_STACK + RESTORE_ALL 8 + jmp irq_return +#endif paranoid_swapgs: +#ifdef CONFIG_PAX_MEMORY_UDEREF + pax_exit_kernel_user +#else + pax_exit_kernel +#endif TRACE_IRQS_IRETQ 0 SWAPGS_UNSAFE_STACK RESTORE_ALL 8 jmp irq_return paranoid_restore: + pax_exit_kernel TRACE_IRQS_IRETQ 0 RESTORE_ALL 8 jmp irq_return @@ -1426,7 +1769,7 @@ ENTRY(error_entry) movq_cfi r14, R14+8 movq_cfi r15, R15+8 xorl %ebx,%ebx - testl $3,CS+8(%rsp) + testb $3,CS+8(%rsp) je error_kernelspace error_swapgs: SWAPGS @@ -1490,6 +1833,16 @@ ENTRY(nmi) CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 call save_paranoid DEFAULT_FRAME 0 +#ifdef CONFIG_PAX_MEMORY_UDEREF + testb $3, CS(%rsp) + jnz 1f + pax_enter_kernel + jmp 2f +1: pax_enter_kernel_user +2: +#else + pax_enter_kernel +#endif /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ movq %rsp,%rdi movq $-1,%rsi @@ -1500,11 +1853,25 @@ ENTRY(nmi) DISABLE_INTERRUPTS(CLBR_NONE) testl %ebx,%ebx /* swapgs needed? */ jnz nmi_restore - testl $3,CS(%rsp) + testb $3,CS(%rsp) jnz nmi_userspace +#ifdef CONFIG_PAX_MEMORY_UDEREF + pax_exit_kernel + SWAPGS_UNSAFE_STACK + RESTORE_ALL 8 + jmp irq_return +#endif nmi_swapgs: +#ifdef CONFIG_PAX_MEMORY_UDEREF + pax_exit_kernel_user +#else + pax_exit_kernel +#endif SWAPGS_UNSAFE_STACK + RESTORE_ALL 8 + jmp irq_return nmi_restore: + pax_exit_kernel RESTORE_ALL 8 jmp irq_return nmi_userspace: diff -urNp linux-2.6.39.1/arch/x86/kernel/ftrace.c linux-2.6.39.1/arch/x86/kernel/ftrace.c --- linux-2.6.39.1/arch/x86/kernel/ftrace.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/ftrace.c 2011-05-22 19:36:30.000000000 -0400 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the static void *mod_code_newcode; /* holds the text to write to the IP */ static unsigned nmi_wait_count; -static atomic_t nmi_update_count = ATOMIC_INIT(0); +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0); int ftrace_arch_read_dyn_info(char *buf, int size) { @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, r = snprintf(buf, size, "%u %u", nmi_wait_count, - atomic_read(&nmi_update_count)); + atomic_read_unchecked(&nmi_update_count)); return r; } @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void) if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { smp_rmb(); + pax_open_kernel(); ftrace_mod_code(); - atomic_inc(&nmi_update_count); + pax_close_kernel(); + atomic_inc_unchecked(&nmi_update_count); } /* Must have previous changes seen before executions */ smp_mb(); @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns { unsigned char replaced[MCOUNT_INSN_SIZE]; + ip = ktla_ktva(ip); + /* * Note: Due to modules and __init, code can * disappear and change, we need to protect against faulting @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun unsigned char old[MCOUNT_INSN_SIZE], *new; int ret; - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE); new = ftrace_call_replace(ip, (unsigned long)func); ret = ftrace_modify_code(ip, old, new); @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long { unsigned char code[MCOUNT_INSN_SIZE]; + ip = ktla_ktva(ip); + if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) return -EFAULT; diff -urNp linux-2.6.39.1/arch/x86/kernel/head32.c linux-2.6.39.1/arch/x86/kernel/head32.c --- linux-2.6.39.1/arch/x86/kernel/head32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/head32.c 2011-05-22 19:36:30.000000000 -0400 @@ -19,6 +19,7 @@ #include #include #include +#include static void __init i386_default_early_setup(void) { @@ -34,7 +35,7 @@ void __init i386_start_kernel(void) { memblock_init(); - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS"); #ifdef CONFIG_BLK_DEV_INITRD /* Reserve INITRD */ diff -urNp linux-2.6.39.1/arch/x86/kernel/head_32.S linux-2.6.39.1/arch/x86/kernel/head_32.S --- linux-2.6.39.1/arch/x86/kernel/head_32.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/head_32.S 2011-05-22 19:36:30.000000000 -0400 @@ -25,6 +25,12 @@ /* Physical address */ #define pa(X) ((X) - __PAGE_OFFSET) +#ifdef CONFIG_PAX_KERNEXEC +#define ta(X) (X) +#else +#define ta(X) ((X) - __PAGE_OFFSET) +#endif + /* * References to members of the new_cpu_data structure. */ @@ -54,11 +60,7 @@ * and small than max_low_pfn, otherwise will waste some page table entries */ -#if PTRS_PER_PMD > 1 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) -#else -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) -#endif +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE) /* Number of possible pages in the lowmem region */ LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P RESERVE_BRK(pagetables, INIT_MAP_SIZE) /* + * Real beginning of normal "text" segment + */ +ENTRY(stext) +ENTRY(_stext) + +/* * 32-bit kernel entrypoint; only used by the boot CPU. On entry, * %esi points to the real-mode code as a 32-bit pointer. * CS and DS must be 4 GB flat segments, but we don't depend on @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE) * can. */ __HEAD + +#ifdef CONFIG_PAX_KERNEXEC + jmp startup_32 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */ +.fill PAGE_SIZE-5,1,0xcc +#endif + ENTRY(startup_32) movl pa(stack_start),%ecx @@ -105,6 +120,57 @@ ENTRY(startup_32) 2: leal -__PAGE_OFFSET(%ecx),%esp +#ifdef CONFIG_SMP + movl $pa(cpu_gdt_table),%edi + movl $__per_cpu_load,%eax + movw %ax,__KERNEL_PERCPU + 2(%edi) + rorl $16,%eax + movb %al,__KERNEL_PERCPU + 4(%edi) + movb %ah,__KERNEL_PERCPU + 7(%edi) + movl $__per_cpu_end - 1,%eax + subl $__per_cpu_start,%eax + movw %ax,__KERNEL_PERCPU + 0(%edi) +#endif + +#ifdef CONFIG_PAX_MEMORY_UDEREF + movl $NR_CPUS,%ecx + movl $pa(cpu_gdt_table),%edi +1: + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi) + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi) + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi) + addl $PAGE_SIZE_asm,%edi + loop 1b +#endif + +#ifdef CONFIG_PAX_KERNEXEC + movl $pa(boot_gdt),%edi + movl $__LOAD_PHYSICAL_ADDR,%eax + movw %ax,__BOOT_CS + 2(%edi) + rorl $16,%eax + movb %al,__BOOT_CS + 4(%edi) + movb %ah,__BOOT_CS + 7(%edi) + rorl $16,%eax + + ljmp $(__BOOT_CS),$1f +1: + + movl $NR_CPUS,%ecx + movl $pa(cpu_gdt_table),%edi + addl $__PAGE_OFFSET,%eax +1: + movw %ax,__KERNEL_CS + 2(%edi) + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi) + rorl $16,%eax + movb %al,__KERNEL_CS + 4(%edi) + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi) + movb %ah,__KERNEL_CS + 7(%edi) + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi) + rorl $16,%eax + addl $PAGE_SIZE_asm,%edi + loop 1b +#endif + /* * Clear BSS first so that there are no surprises... */ @@ -195,8 +261,11 @@ ENTRY(startup_32) movl %eax, pa(max_pfn_mapped) /* Do early initialization of the fixmap area */ - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) +#ifdef CONFIG_COMPAT_VDSO + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8) +#else + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8) +#endif #else /* Not PAE */ page_pde_offset = (__PAGE_OFFSET >> 20); @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20); movl %eax, pa(max_pfn_mapped) /* Do early initialization of the fixmap area */ - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax - movl %eax,pa(initial_page_table+0xffc) +#ifdef CONFIG_COMPAT_VDSO + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc) +#else + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc) +#endif #endif #ifdef CONFIG_PARAVIRT @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20); cmpl $num_subarch_entries, %eax jae bad_subarch - movl pa(subarch_entries)(,%eax,4), %eax - subl $__PAGE_OFFSET, %eax - jmp *%eax + jmp *pa(subarch_entries)(,%eax,4) bad_subarch: WEAK(lguest_entry) @@ -255,10 +325,10 @@ WEAK(xen_entry) __INITDATA subarch_entries: - .long default_entry /* normal x86/PC */ - .long lguest_entry /* lguest hypervisor */ - .long xen_entry /* Xen hypervisor */ - .long default_entry /* Moorestown MID */ + .long ta(default_entry) /* normal x86/PC */ + .long ta(lguest_entry) /* lguest hypervisor */ + .long ta(xen_entry) /* Xen hypervisor */ + .long ta(default_entry) /* Moorestown MID */ num_subarch_entries = (. - subarch_entries) / 4 .previous #else @@ -312,6 +382,7 @@ default_entry: orl %edx,%eax movl %eax,%cr4 +#ifdef CONFIG_X86_PAE testb $X86_CR4_PAE, %al # check if PAE is enabled jz 6f @@ -340,6 +411,9 @@ default_entry: /* Make changes effective */ wrmsr + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4) +#endif + 6: /* @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP 1: movl $(__KERNEL_DS),%eax # reload all the segment registers movl %eax,%ss # after changing gdt. - movl $(__USER_DS),%eax # DS/ES contains default USER segment +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment movl %eax,%ds movl %eax,%es @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP */ cmpb $0,ready jne 1f - movl $gdt_page,%eax + movl $cpu_gdt_table,%eax movl $stack_canary,%ecx +#ifdef CONFIG_SMP + addl $__per_cpu_load,%ecx +#endif movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) shrl $16, %ecx movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax) 1: -#endif movl $(__KERNEL_STACK_CANARY),%eax +#elif defined(CONFIG_PAX_MEMORY_UDEREF) + movl $(__USER_DS),%eax +#else + xorl %eax,%eax +#endif movl %eax,%gs xorl %eax,%eax # Clear LDT @@ -558,22 +639,22 @@ early_page_fault: jmp early_fault early_fault: - cld #ifdef CONFIG_PRINTK + cmpl $1,%ss:early_recursion_flag + je hlt_loop + incl %ss:early_recursion_flag + cld pusha movl $(__KERNEL_DS),%eax movl %eax,%ds movl %eax,%es - cmpl $2,early_recursion_flag - je hlt_loop - incl early_recursion_flag movl %cr2,%eax pushl %eax pushl %edx /* trapno */ pushl $fault_msg call printk +; call dump_stack #endif - call dump_stack hlt_loop: hlt jmp hlt_loop @@ -581,8 +662,11 @@ hlt_loop: /* This is the default interrupt "handler" :-) */ ALIGN ignore_int: - cld #ifdef CONFIG_PRINTK + cmpl $2,%ss:early_recursion_flag + je hlt_loop + incl %ss:early_recursion_flag + cld pushl %eax pushl %ecx pushl %edx @@ -591,9 +675,6 @@ ignore_int: movl $(__KERNEL_DS),%eax movl %eax,%ds movl %eax,%es - cmpl $2,early_recursion_flag - je hlt_loop - incl early_recursion_flag pushl 16(%esp) pushl 24(%esp) pushl 32(%esp) @@ -622,29 +703,43 @@ ENTRY(initial_code) /* * BSS section */ -__PAGE_ALIGNED_BSS - .align PAGE_SIZE #ifdef CONFIG_X86_PAE +.section .initial_pg_pmd,"a",@progbits initial_pg_pmd: .fill 1024*KPMDS,4,0 #else +.section .initial_page_table,"a",@progbits ENTRY(initial_page_table) .fill 1024,4,0 #endif +.section .initial_pg_fixmap,"a",@progbits initial_pg_fixmap: .fill 1024,4,0 +.section .empty_zero_page,"a",@progbits ENTRY(empty_zero_page) .fill 4096,1,0 +.section .swapper_pg_dir,"a",@progbits ENTRY(swapper_pg_dir) +#ifdef CONFIG_X86_PAE + .fill 4,8,0 +#else .fill 1024,4,0 +#endif + +/* + * The IDT has to be page-aligned to simplify the Pentium + * F0 0F bug workaround.. We have a special link segment + * for this. + */ +.section .idt,"a",@progbits +ENTRY(idt_table) + .fill 256,8,0 /* * This starts the data section. */ #ifdef CONFIG_X86_PAE -__PAGE_ALIGNED_DATA - /* Page-aligned for the benefit of paravirt? */ - .align PAGE_SIZE +.section .initial_page_table,"a",@progbits ENTRY(initial_page_table) .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ # if KPMDS == 3 @@ -663,18 +758,27 @@ ENTRY(initial_page_table) # error "Kernel PMDs should be 1, 2 or 3" # endif .align PAGE_SIZE /* needs to be page-sized too */ + +#ifdef CONFIG_PAX_PER_CPU_PGD +ENTRY(cpu_pgd) + .rept NR_CPUS + .fill 4,8,0 + .endr +#endif + #endif .data .balign 4 ENTRY(stack_start) - .long init_thread_union+THREAD_SIZE + .long init_thread_union+THREAD_SIZE-8 + +ready: .byte 0 +.section .rodata,"a",@progbits early_recursion_flag: .long 0 -ready: .byte 0 - int_msg: .asciz "Unknown interrupt or fault at: %p %p %p\n" @@ -707,7 +811,7 @@ fault_msg: .word 0 # 32 bit align gdt_desc.address boot_gdt_descr: .word __BOOT_DS+7 - .long boot_gdt - __PAGE_OFFSET + .long pa(boot_gdt) .word 0 # 32-bit align idt_desc.address idt_descr: @@ -718,7 +822,7 @@ idt_descr: .word 0 # 32 bit align gdt_desc.address ENTRY(early_gdt_descr) .word GDT_ENTRIES*8-1 - .long gdt_page /* Overwritten for secondary CPUs */ + .long cpu_gdt_table /* Overwritten for secondary CPUs */ /* * The boot_gdt must mirror the equivalent in setup.S and is @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr) .align L1_CACHE_BYTES ENTRY(boot_gdt) .fill GDT_ENTRY_BOOT_CS,8,0 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */ + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */ + + .align PAGE_SIZE_asm +ENTRY(cpu_gdt_table) + .rept NR_CPUS + .quad 0x0000000000000000 /* NULL descriptor */ + .quad 0x0000000000000000 /* 0x0b reserved */ + .quad 0x0000000000000000 /* 0x13 reserved */ + .quad 0x0000000000000000 /* 0x1b reserved */ + +#ifdef CONFIG_PAX_KERNEXEC + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */ +#else + .quad 0x0000000000000000 /* 0x20 unused */ +#endif + + .quad 0x0000000000000000 /* 0x28 unused */ + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */ + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */ + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */ + .quad 0x0000000000000000 /* 0x4b reserved */ + .quad 0x0000000000000000 /* 0x53 reserved */ + .quad 0x0000000000000000 /* 0x5b reserved */ + + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */ + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */ + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */ + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */ + + .quad 0x0000000000000000 /* 0x80 TSS descriptor */ + .quad 0x0000000000000000 /* 0x88 LDT descriptor */ + + /* + * Segments used for calling PnP BIOS have byte granularity. + * The code segments and data segments have fixed 64k limits, + * the transfer segment sizes are set at run time. + */ + .quad 0x00409b000000ffff /* 0x90 32-bit code */ + .quad 0x00009b000000ffff /* 0x98 16-bit code */ + .quad 0x000093000000ffff /* 0xa0 16-bit data */ + .quad 0x0000930000000000 /* 0xa8 16-bit data */ + .quad 0x0000930000000000 /* 0xb0 16-bit data */ + + /* + * The APM segments have byte granularity and their bases + * are set at run time. All have 64k limits. + */ + .quad 0x00409b000000ffff /* 0xb8 APM CS code */ + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */ + .quad 0x004093000000ffff /* 0xc8 APM DS data */ + + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */ + .quad 0x0040930000000000 /* 0xd8 - PERCPU */ + .quad 0x0040910000000018 /* 0xe0 - STACK_CANARY */ + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */ + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */ + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */ + + /* Be sure this is zeroed to avoid false validations in Xen */ + .fill PAGE_SIZE_asm - GDT_SIZE,1,0 + .endr diff -urNp linux-2.6.39.1/arch/x86/kernel/head_64.S linux-2.6.39.1/arch/x86/kernel/head_64.S --- linux-2.6.39.1/arch/x86/kernel/head_64.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/head_64.S 2011-05-22 19:36:30.000000000 -0400 @@ -19,6 +19,7 @@ #include #include #include +#include #ifdef CONFIG_PARAVIRT #include @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET) L4_START_KERNEL = pgd_index(__START_KERNEL_map) L3_START_KERNEL = pud_index(__START_KERNEL_map) +L4_VMALLOC_START = pgd_index(VMALLOC_START) +L3_VMALLOC_START = pud_index(VMALLOC_START) +L4_VMEMMAP_START = pgd_index(VMEMMAP_START) +L3_VMEMMAP_START = pud_index(VMEMMAP_START) .text __HEAD @@ -85,35 +90,22 @@ startup_64: */ addq %rbp, init_level4_pgt + 0(%rip) addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip) + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip) + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip) addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip) addq %rbp, level3_ident_pgt + 0(%rip) +#ifndef CONFIG_XEN + addq %rbp, level3_ident_pgt + 8(%rip) +#endif - addq %rbp, level3_kernel_pgt + (510*8)(%rip) - addq %rbp, level3_kernel_pgt + (511*8)(%rip) + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip) - addq %rbp, level2_fixmap_pgt + (506*8)(%rip) + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip) + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip) - /* Add an Identity mapping if I am above 1G */ - leaq _text(%rip), %rdi - andq $PMD_PAGE_MASK, %rdi - - movq %rdi, %rax - shrq $PUD_SHIFT, %rax - andq $(PTRS_PER_PUD - 1), %rax - jz ident_complete - - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx - leaq level3_ident_pgt(%rip), %rbx - movq %rdx, 0(%rbx, %rax, 8) - - movq %rdi, %rax - shrq $PMD_SHIFT, %rax - andq $(PTRS_PER_PMD - 1), %rax - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx - leaq level2_spare_pgt(%rip), %rbx - movq %rdx, 0(%rbx, %rax, 8) -ident_complete: + addq %rbp, level2_fixmap_pgt + (506*8)(%rip) + addq %rbp, level2_fixmap_pgt + (507*8)(%rip) /* * Fixup the kernel text+data virtual addresses. Note that @@ -160,8 +152,8 @@ ENTRY(secondary_startup_64) * after the boot processor executes this code. */ - /* Enable PAE mode and PGE */ - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax + /* Enable PAE mode and PSE/PGE */ + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax movq %rax, %cr4 /* Setup early boot stage 4 level pagetables. */ @@ -183,9 +175,14 @@ ENTRY(secondary_startup_64) movl $MSR_EFER, %ecx rdmsr btsl $_EFER_SCE, %eax /* Enable System Call */ - btl $20,%edi /* No Execute supported? */ + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */ jnc 1f btsl $_EFER_NX, %eax + leaq init_level4_pgt(%rip), %rdi + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi) + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi) + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi) + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip) 1: wrmsr /* Make changes effective */ /* Setup cr0 */ @@ -269,7 +266,7 @@ ENTRY(secondary_startup_64) bad_address: jmp bad_address - .section ".init.text","ax" + __INIT #ifdef CONFIG_EARLY_PRINTK .globl early_idt_handlers early_idt_handlers: @@ -314,18 +311,23 @@ ENTRY(early_idt_handler) #endif /* EARLY_PRINTK */ 1: hlt jmp 1b + .previous #ifdef CONFIG_EARLY_PRINTK + __INITDATA early_recursion_flag: .long 0 + .previous + .section .rodata,"a",@progbits early_idt_msg: .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" early_idt_ripmsg: .asciz "RIP %s\n" -#endif /* CONFIG_EARLY_PRINTK */ .previous +#endif /* CONFIG_EARLY_PRINTK */ + .section .rodata,"a",@progbits #define NEXT_PAGE(name) \ .balign PAGE_SIZE; \ ENTRY(name) @@ -338,7 +340,6 @@ ENTRY(name) i = i + 1 ; \ .endr - .data /* * This default setting generates an ident mapping at address 0x100000 * and a mapping for the kernel that precisely maps virtual address @@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt) .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE + .org init_level4_pgt + L4_VMALLOC_START*8, 0 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE + .org init_level4_pgt + L4_VMEMMAP_START*8, 0 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE .org init_level4_pgt + L4_START_KERNEL*8, 0 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE +#ifdef CONFIG_PAX_PER_CPU_PGD +NEXT_PAGE(cpu_pgd) + .rept NR_CPUS + .fill 512,8,0 + .endr +#endif + NEXT_PAGE(level3_ident_pgt) .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE +#ifdef CONFIG_XEN .fill 511,8,0 +#else + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE + .fill 510,8,0 +#endif + +NEXT_PAGE(level3_vmalloc_pgt) + .fill 512,8,0 + +NEXT_PAGE(level3_vmemmap_pgt) + .fill L3_VMEMMAP_START,8,0 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE NEXT_PAGE(level3_kernel_pgt) .fill L3_START_KERNEL,8,0 @@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt) .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE +NEXT_PAGE(level2_vmemmap_pgt) + .fill 512,8,0 + NEXT_PAGE(level2_fixmap_pgt) - .fill 506,8,0 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ - .fill 5,8,0 + .fill 507,8,0 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */ + .fill 4,8,0 -NEXT_PAGE(level1_fixmap_pgt) +NEXT_PAGE(level1_vsyscall_pgt) .fill 512,8,0 -NEXT_PAGE(level2_ident_pgt) - /* Since I easily can, map the first 1G. + /* Since I easily can, map the first 2G. * Don't set NX because code runs from these pages. */ - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) +NEXT_PAGE(level2_ident_pgt) + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD) NEXT_PAGE(level2_kernel_pgt) /* @@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt) * If you want to increase this then increase MODULES_VADDR * too.) */ - PMDS(0, __PAGE_KERNEL_LARGE_EXEC, - KERNEL_IMAGE_SIZE/PMD_SIZE) - -NEXT_PAGE(level2_spare_pgt) - .fill 512, 8, 0 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE) #undef PMDS #undef NEXT_PAGE - .data + .align PAGE_SIZE +ENTRY(cpu_gdt_table) + .rept NR_CPUS + .quad 0x0000000000000000 /* NULL descriptor */ + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */ + .quad 0x00af9b000000ffff /* __KERNEL_CS */ + .quad 0x00cf93000000ffff /* __KERNEL_DS */ + .quad 0x00cffb000000ffff /* __USER32_CS */ + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */ + .quad 0x00affb000000ffff /* __USER_CS */ + +#ifdef CONFIG_PAX_KERNEXEC + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */ +#else + .quad 0x0 /* unused */ +#endif + + .quad 0,0 /* TSS */ + .quad 0,0 /* LDT */ + .quad 0,0,0 /* three TLS descriptors */ + .quad 0x0000f40000000000 /* node/CPU stored in limit */ + /* asm/segment.h:GDT_ENTRIES must match this */ + + /* zero the remaining page */ + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0 + .endr + .align 16 .globl early_gdt_descr early_gdt_descr: .word GDT_ENTRIES*8-1 early_gdt_descr_base: - .quad INIT_PER_CPU_VAR(gdt_page) + .quad cpu_gdt_table ENTRY(phys_base) /* This must match the first entry in level2_kernel_pgt */ .quad 0x0000000000000000 #include "../../x86/xen/xen-head.S" - - .section .bss, "aw", @nobits + + .section .rodata,"a",@progbits .align L1_CACHE_BYTES ENTRY(idt_table) - .skip IDT_ENTRIES * 16 + .fill 512,8,0 __PAGE_ALIGNED_BSS .align PAGE_SIZE diff -urNp linux-2.6.39.1/arch/x86/kernel/i386_ksyms_32.c linux-2.6.39.1/arch/x86/kernel/i386_ksyms_32.c --- linux-2.6.39.1/arch/x86/kernel/i386_ksyms_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/i386_ksyms_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void); EXPORT_SYMBOL(cmpxchg8b_emu); #endif +EXPORT_SYMBOL_GPL(cpu_gdt_table); + /* Networking helper routines. */ EXPORT_SYMBOL(csum_partial_copy_generic); +EXPORT_SYMBOL(csum_partial_copy_generic_to_user); +EXPORT_SYMBOL(csum_partial_copy_generic_from_user); EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr); EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(empty_zero_page); + +#ifdef CONFIG_PAX_KERNEXEC +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR); +#endif diff -urNp linux-2.6.39.1/arch/x86/kernel/i8259.c linux-2.6.39.1/arch/x86/kernel/i8259.c --- linux-2.6.39.1/arch/x86/kernel/i8259.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/i8259.c 2011-05-22 19:36:30.000000000 -0400 @@ -210,7 +210,7 @@ spurious_8259A_irq: "spurious 8259A interrupt: IRQ%d.\n", irq); spurious_irq_mask |= irqmask; } - atomic_inc(&irq_err_count); + atomic_inc_unchecked(&irq_err_count); /* * Theoretically we do not have to handle this IRQ, * but in Linux this does not cause problems and is diff -urNp linux-2.6.39.1/arch/x86/kernel/init_task.c linux-2.6.39.1/arch/x86/kernel/init_task.c --- linux-2.6.39.1/arch/x86/kernel/init_task.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/init_task.c 2011-05-22 19:36:30.000000000 -0400 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan * way process stacks are handled. This is done by having a special * "init_task" linker map entry.. */ -union thread_union init_thread_union __init_task_data = - { INIT_THREAD_INFO(init_task) }; +union thread_union init_thread_union __init_task_data; /* * Initial task structure. @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task); * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; - +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS }; +EXPORT_SYMBOL(init_tss); diff -urNp linux-2.6.39.1/arch/x86/kernel/ioport.c linux-2.6.39.1/arch/x86/kernel/ioport.c --- linux-2.6.39.1/arch/x86/kernel/ioport.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/ioport.c 2011-05-22 19:41:32.000000000 -0400 @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long if ((from + num <= from) || (from + num > IO_BITMAP_BITS)) return -EINVAL; +#ifdef CONFIG_GRKERNSEC_IO + if (turn_on && grsec_disable_privio) { + gr_handle_ioperm(); + return -EPERM; + } +#endif if (turn_on && !capable(CAP_SYS_RAWIO)) return -EPERM; @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long * because the ->io_bitmap_max value must match the bitmap * contents: */ - tss = &per_cpu(init_tss, get_cpu()); + tss = init_tss + get_cpu(); if (turn_on) bitmap_clear(t->io_bitmap_ptr, from, num); @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct return -EINVAL; /* Trying to gain more privileges? */ if (level > old) { +#ifdef CONFIG_GRKERNSEC_IO + if (grsec_disable_privio) { + gr_handle_iopl(); + return -EPERM; + } +#endif if (!capable(CAP_SYS_RAWIO)) return -EPERM; } diff -urNp linux-2.6.39.1/arch/x86/kernel/irq_32.c linux-2.6.39.1/arch/x86/kernel/irq_32.c --- linux-2.6.39.1/arch/x86/kernel/irq_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/irq_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -36,7 +36,7 @@ static int check_stack_overflow(void) __asm__ __volatile__("andl %%esp,%0" : "=r" (sp) : "0" (THREAD_SIZE - 1)); - return sp < (sizeof(struct thread_info) + STACK_WARN); + return sp < STACK_WARN; } static void print_stack_overflow(void) @@ -54,8 +54,8 @@ static inline void print_stack_overflow( * per-CPU IRQ handling contexts (thread information and stack) */ union irq_ctx { - struct thread_info tinfo; - u32 stack[THREAD_SIZE/sizeof(u32)]; + unsigned long previous_esp; + u32 stack[THREAD_SIZE/sizeof(u32)]; } __attribute__((aligned(THREAD_SIZE))); static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); @@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { - union irq_ctx *curctx, *irqctx; + union irq_ctx *irqctx; u32 *isp, arg1, arg2; - curctx = (union irq_ctx *) current_thread_info(); irqctx = __this_cpu_read(hardirq_ctx); /* @@ -87,21 +86,17 @@ execute_on_irq_stack(int overflow, struc * handler) we can't do that and just have to keep using the * current stack (which is the irq stack already after all) */ - if (unlikely(curctx == irqctx)) + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE)) return 0; /* build the stack frame on the IRQ stack */ - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); - irqctx->tinfo.task = curctx->tinfo.task; - irqctx->tinfo.previous_esp = current_stack_pointer; + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8); + irqctx->previous_esp = current_stack_pointer; + add_preempt_count(HARDIRQ_OFFSET); - /* - * Copy the softirq bits in preempt_count so that the - * softirq checks work in the hardirq context. - */ - irqctx->tinfo.preempt_count = - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | - (curctx->tinfo.preempt_count & SOFTIRQ_MASK); +#ifdef CONFIG_PAX_MEMORY_UDEREF + __set_fs(MAKE_MM_SEG(0)); +#endif if (unlikely(overflow)) call_on_stack(print_stack_overflow, isp); @@ -113,6 +108,12 @@ execute_on_irq_stack(int overflow, struc : "0" (irq), "1" (desc), "2" (isp), "D" (desc->handle_irq) : "memory", "cc", "ecx"); + +#ifdef CONFIG_PAX_MEMORY_UDEREF + __set_fs(current_thread_info()->addr_limit); +#endif + + sub_preempt_count(HARDIRQ_OFFSET); return 1; } @@ -121,29 +122,11 @@ execute_on_irq_stack(int overflow, struc */ void __cpuinit irq_ctx_init(int cpu) { - union irq_ctx *irqctx; - if (per_cpu(hardirq_ctx, cpu)) return; - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), - THREAD_FLAGS, - THREAD_ORDER)); - memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); - irqctx->tinfo.cpu = cpu; - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); - - per_cpu(hardirq_ctx, cpu) = irqctx; - - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu), - THREAD_FLAGS, - THREAD_ORDER)); - memset(&irqctx->tinfo, 0, sizeof(struct thread_info)); - irqctx->tinfo.cpu = cpu; - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); - - per_cpu(softirq_ctx, cpu) = irqctx; + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER)); + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER)); printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); @@ -152,7 +135,6 @@ void __cpuinit irq_ctx_init(int cpu) asmlinkage void do_softirq(void) { unsigned long flags; - struct thread_info *curctx; union irq_ctx *irqctx; u32 *isp; @@ -162,15 +144,22 @@ asmlinkage void do_softirq(void) local_irq_save(flags); if (local_softirq_pending()) { - curctx = current_thread_info(); irqctx = __this_cpu_read(softirq_ctx); - irqctx->tinfo.task = curctx->task; - irqctx->tinfo.previous_esp = current_stack_pointer; + irqctx->previous_esp = current_stack_pointer; /* build the stack frame on the softirq stack */ - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8); + +#ifdef CONFIG_PAX_MEMORY_UDEREF + __set_fs(MAKE_MM_SEG(0)); +#endif call_on_stack(__do_softirq, isp); + +#ifdef CONFIG_PAX_MEMORY_UDEREF + __set_fs(current_thread_info()->addr_limit); +#endif + /* * Shouldn't happen, we returned above if in_interrupt(): */ diff -urNp linux-2.6.39.1/arch/x86/kernel/irq.c linux-2.6.39.1/arch/x86/kernel/irq.c --- linux-2.6.39.1/arch/x86/kernel/irq.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/irq.c 2011-05-22 19:36:30.000000000 -0400 @@ -17,7 +17,7 @@ #include #include -atomic_t irq_err_count; +atomic_unchecked_t irq_err_count; /* Function pointer for generic interrupt vector handling */ void (*x86_platform_ipi_callback)(void) = NULL; @@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); seq_printf(p, " Machine check polls\n"); #endif - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count)); #if defined(CONFIG_X86_IO_APIC) - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count)); #endif return 0; } @@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu) u64 arch_irq_stat(void) { - u64 sum = atomic_read(&irq_err_count); + u64 sum = atomic_read_unchecked(&irq_err_count); #ifdef CONFIG_X86_IO_APIC - sum += atomic_read(&irq_mis_count); + sum += atomic_read_unchecked(&irq_mis_count); #endif return sum; } diff -urNp linux-2.6.39.1/arch/x86/kernel/kgdb.c linux-2.6.39.1/arch/x86/kernel/kgdb.c --- linux-2.6.39.1/arch/x86/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/kgdb.c 2011-05-22 19:36:30.000000000 -0400 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem, #ifdef CONFIG_X86_32 switch (regno) { case GDB_SS: - if (!user_mode_vm(regs)) + if (!user_mode(regs)) *(unsigned long *)mem = __KERNEL_DS; break; case GDB_SP: - if (!user_mode_vm(regs)) + if (!user_mode(regs)) *(unsigned long *)mem = kernel_stack_pointer(regs); break; case GDB_GS: @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec case 'k': /* clear the trace bit */ linux_regs->flags &= ~X86_EFLAGS_TF; - atomic_set(&kgdb_cpu_doing_single_step, -1); + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1); /* set the trace bit if we're stepping */ if (remcomInBuffer[0] == 's') { linux_regs->flags |= X86_EFLAGS_TF; - atomic_set(&kgdb_cpu_doing_single_step, + atomic_set_unchecked(&kgdb_cpu_doing_single_step, raw_smp_processor_id()); } @@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args return NOTIFY_DONE; case DIE_DEBUG: - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) { if (user_mode(regs)) return single_step_cont(regs, args); break; @@ -710,7 +710,7 @@ void kgdb_arch_set_pc(struct pt_regs *re regs->ip = ip; } -struct kgdb_arch arch_kgdb_ops = { +const struct kgdb_arch arch_kgdb_ops = { /* Breakpoint instruction: */ .gdb_bpt_instr = { 0xcc }, .flags = KGDB_HW_BREAKPOINT, diff -urNp linux-2.6.39.1/arch/x86/kernel/kprobes.c linux-2.6.39.1/arch/x86/kernel/kprobes.c --- linux-2.6.39.1/arch/x86/kernel/kprobes.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/kprobes.c 2011-05-22 19:36:30.000000000 -0400 @@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat } __attribute__((packed)) *insn; insn = (struct __arch_relative_insn *)from; + + pax_open_kernel(); insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); insn->op = op; + pax_close_kernel(); } /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ @@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op kprobe_opcode_t opcode; kprobe_opcode_t *orig_opcodes = opcodes; - if (search_exception_tables((unsigned long)opcodes)) + if (search_exception_tables(ktva_ktla((unsigned long)opcodes))) return 0; /* Page fault may occur on this address. */ retry: @@ -314,7 +317,9 @@ static int __kprobes __copy_instruction( } } insn_get_length(&insn); + pax_open_kernel(); memcpy(dest, insn.kaddr, insn.length); + pax_close_kernel(); #ifdef CONFIG_X86_64 if (insn_rip_relative(&insn)) { @@ -338,7 +343,9 @@ static int __kprobes __copy_instruction( (u8 *) dest; BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ disp = (u8 *) dest + insn_offset_displacement(&insn); + pax_open_kernel(); *(s32 *) disp = (s32) newdisp; + pax_close_kernel(); } #endif return insn.length; @@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s */ __copy_instruction(p->ainsn.insn, p->addr, 0); - if (can_boost(p->addr)) + if (can_boost(ktla_ktva(p->addr))) p->ainsn.boostable = 0; else p->ainsn.boostable = -1; - p->opcode = *p->addr; + p->opcode = *(ktla_ktva(p->addr)); } int __kprobes arch_prepare_kprobe(struct kprobe *p) @@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s * nor set current_kprobe, because it doesn't use single * stepping. */ - regs->ip = (unsigned long)p->ainsn.insn; + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn); preempt_enable_no_resched(); return; } @@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s if (p->opcode == BREAKPOINT_INSTRUCTION) regs->ip = (unsigned long)p->addr; else - regs->ip = (unsigned long)p->ainsn.insn; + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn); } /* @@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru setup_singlestep(p, regs, kcb, 0); return 1; } - } else if (*addr != BREAKPOINT_INSTRUCTION) { + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) { /* * The breakpoint instruction was removed right * after we hit it. Another cpu has removed @@ -817,7 +824,7 @@ static void __kprobes resume_execution(s struct pt_regs *regs, struct kprobe_ctlblk *kcb) { unsigned long *tos = stack_addr(regs); - unsigned long copy_ip = (unsigned long)p->ainsn.insn; + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn); unsigned long orig_ip = (unsigned long)p->addr; kprobe_opcode_t *insn = p->ainsn.insn; @@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s struct die_args *args = data; int ret = NOTIFY_DONE; - if (args->regs && user_mode_vm(args->regs)) + if (args->regs && user_mode(args->regs)) return ret; switch (val) { @@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr * Verify if the address gap is in 2GB range, because this uses * a relative jump. */ - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE; if (abs(rel) > 0x7fffffff) return -ERANGE; @@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op); /* Set probe function call */ - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback); + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback)); /* Set returning jmp instruction at the tail of out-of-line buffer */ synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, - (u8 *)op->kp.addr + op->optinsn.size); + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size); flush_icache_range((unsigned long) buf, (unsigned long) buf + TMPL_END_IDX + @@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr ((long)op->kp.addr + RELATIVEJUMP_SIZE)); /* Backup instructions which will be replaced by jump address */ - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE, RELATIVE_ADDR_SIZE); insn_buf[0] = RELATIVEJUMP_OPCODE; diff -urNp linux-2.6.39.1/arch/x86/kernel/ldt.c linux-2.6.39.1/arch/x86/kernel/ldt.c --- linux-2.6.39.1/arch/x86/kernel/ldt.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/ldt.c 2011-05-22 19:36:30.000000000 -0400 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i if (reload) { #ifdef CONFIG_SMP preempt_disable(); - load_LDT(pc); + load_LDT_nolock(pc); if (!cpumask_equal(mm_cpumask(current->mm), cpumask_of(smp_processor_id()))) smp_call_function(flush_ldt, current->mm, 1); preempt_enable(); #else - load_LDT(pc); + load_LDT_nolock(pc); #endif } if (oldsize) { @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t return err; for (i = 0; i < old->size; i++) - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); + write_ldt_entry(new->ldt, i, old->ldt + i); return 0; } @@ -116,6 +116,24 @@ int init_new_context(struct task_struct retval = copy_ldt(&mm->context, &old_mm->context); mutex_unlock(&old_mm->context.lock); } + + if (tsk == current) { + mm->context.vdso = 0; + +#ifdef CONFIG_X86_32 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + mm->context.user_cs_base = 0UL; + mm->context.user_cs_limit = ~0UL; + +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) + cpus_clear(mm->context.cpu_user_cs_mask); +#endif + +#endif +#endif + + } + return retval; } @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u } } +#ifdef CONFIG_PAX_SEGMEXEC + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) { + error = -EINVAL; + goto out_unlock; + } +#endif + fill_ldt(&ldt, &ldt_info); if (oldmode) ldt.avl = 0; diff -urNp linux-2.6.39.1/arch/x86/kernel/machine_kexec_32.c linux-2.6.39.1/arch/x86/kernel/machine_kexec_32.c --- linux-2.6.39.1/arch/x86/kernel/machine_kexec_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/machine_kexec_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -27,7 +27,7 @@ #include #include -static void set_idt(void *newidt, __u16 limit) +static void set_idt(struct desc_struct *newidt, __u16 limit) { struct desc_ptr curidt; @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 } -static void set_gdt(void *newgdt, __u16 limit) +static void set_gdt(struct desc_struct *newgdt, __u16 limit) { struct desc_ptr curgdt; @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image) } control_page = page_address(image->control_code_page); - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE); + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE); relocate_kernel_ptr = control_page; page_list[PA_CONTROL_PAGE] = __pa(control_page); diff -urNp linux-2.6.39.1/arch/x86/kernel/microcode_amd.c linux-2.6.39.1/arch/x86/kernel/microcode_amd.c --- linux-2.6.39.1/arch/x86/kernel/microcode_amd.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/microcode_amd.c 2011-05-22 19:36:30.000000000 -0400 @@ -339,7 +339,7 @@ static void microcode_fini_cpu_amd(int c uci->mc = NULL; } -static struct microcode_ops microcode_amd_ops = { +static const struct microcode_ops microcode_amd_ops = { .request_microcode_user = request_microcode_user, .request_microcode_fw = request_microcode_amd, .collect_cpu_info = collect_cpu_info_amd, @@ -347,7 +347,7 @@ static struct microcode_ops microcode_am .microcode_fini_cpu = microcode_fini_cpu_amd, }; -struct microcode_ops * __init init_amd_microcode(void) +const struct microcode_ops * __init init_amd_microcode(void) { return µcode_amd_ops; } diff -urNp linux-2.6.39.1/arch/x86/kernel/microcode_core.c linux-2.6.39.1/arch/x86/kernel/microcode_core.c --- linux-2.6.39.1/arch/x86/kernel/microcode_core.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/microcode_core.c 2011-05-22 19:36:30.000000000 -0400 @@ -93,7 +93,7 @@ MODULE_LICENSE("GPL"); #define MICROCODE_VERSION "2.00" -static struct microcode_ops *microcode_ops; +static const struct microcode_ops *microcode_ops; /* * Synchronization. diff -urNp linux-2.6.39.1/arch/x86/kernel/microcode_intel.c linux-2.6.39.1/arch/x86/kernel/microcode_intel.c --- linux-2.6.39.1/arch/x86/kernel/microcode_intel.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/microcode_intel.c 2011-05-22 19:36:30.000000000 -0400 @@ -440,13 +440,13 @@ static enum ucode_state request_microcod static int get_ucode_user(void *to, const void *from, size_t n) { - return copy_from_user(to, from, n); + return copy_from_user(to, (__force const void __user *)from, n); } static enum ucode_state request_microcode_user(int cpu, const void __user *buf, size_t size) { - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user); + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user); } static void microcode_fini_cpu(int cpu) @@ -457,7 +457,7 @@ static void microcode_fini_cpu(int cpu) uci->mc = NULL; } -static struct microcode_ops microcode_intel_ops = { +static const struct microcode_ops microcode_intel_ops = { .request_microcode_user = request_microcode_user, .request_microcode_fw = request_microcode_fw, .collect_cpu_info = collect_cpu_info, @@ -465,7 +465,7 @@ static struct microcode_ops microcode_in .microcode_fini_cpu = microcode_fini_cpu, }; -struct microcode_ops * __init init_intel_microcode(void) +const struct microcode_ops * __init init_intel_microcode(void) { return µcode_intel_ops; } diff -urNp linux-2.6.39.1/arch/x86/kernel/module.c linux-2.6.39.1/arch/x86/kernel/module.c --- linux-2.6.39.1/arch/x86/kernel/module.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/module.c 2011-05-22 19:36:30.000000000 -0400 @@ -35,21 +35,66 @@ #define DEBUGP(fmt...) #endif -void *module_alloc(unsigned long size) +static inline void *__module_alloc(unsigned long size, pgprot_t prot) { if (PAGE_ALIGN(size) > MODULES_LEN) return NULL; return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot, -1, __builtin_return_address(0)); } +void *module_alloc(unsigned long size) +{ + +#ifdef CONFIG_PAX_KERNEXEC + return __module_alloc(size, PAGE_KERNEL); +#else + return __module_alloc(size, PAGE_KERNEL_EXEC); +#endif + +} + /* Free memory returned from module_alloc */ void module_free(struct module *mod, void *module_region) { vfree(module_region); } +#ifdef CONFIG_PAX_KERNEXEC +#ifdef CONFIG_X86_32 +void *module_alloc_exec(unsigned long size) +{ + struct vm_struct *area; + + if (size == 0) + return NULL; + + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END); + return area ? area->addr : NULL; +} +EXPORT_SYMBOL(module_alloc_exec); + +void module_free_exec(struct module *mod, void *module_region) +{ + vunmap(module_region); +} +EXPORT_SYMBOL(module_free_exec); +#else +void module_free_exec(struct module *mod, void *module_region) +{ + module_free(mod, module_region); +} +EXPORT_SYMBOL(module_free_exec); + +void *module_alloc_exec(unsigned long size) +{ + return __module_alloc(size, PAGE_KERNEL_RX); +} +EXPORT_SYMBOL(module_alloc_exec); +#endif +#endif + /* We don't need anything special. */ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, @@ -69,14 +114,16 @@ int apply_relocate(Elf32_Shdr *sechdrs, unsigned int i; Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; Elf32_Sym *sym; - uint32_t *location; + uint32_t *plocation, location; DEBUGP("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr - + rel[i].r_offset; + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; + location = (uint32_t)plocation; + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR) + plocation = ktla_ktva((void *)plocation); /* This is the symbol it is referring to. Note that all undefined symbols have been resolved. */ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr @@ -85,11 +132,15 @@ int apply_relocate(Elf32_Shdr *sechdrs, switch (ELF32_R_TYPE(rel[i].r_info)) { case R_386_32: /* We add the value into the location given */ - *location += sym->st_value; + pax_open_kernel(); + *plocation += sym->st_value; + pax_close_kernel(); break; case R_386_PC32: /* Add the value, subtract its postition */ - *location += sym->st_value - (uint32_t)location; + pax_open_kernel(); + *plocation += sym->st_value - location; + pax_close_kernel(); break; default: printk(KERN_ERR "module %s: Unknown relocation: %u\n", @@ -145,21 +196,30 @@ int apply_relocate_add(Elf64_Shdr *sechd case R_X86_64_NONE: break; case R_X86_64_64: + pax_open_kernel(); *(u64 *)loc = val; + pax_close_kernel(); break; case R_X86_64_32: + pax_open_kernel(); *(u32 *)loc = val; + pax_close_kernel(); if (val != *(u32 *)loc) goto overflow; break; case R_X86_64_32S: + pax_open_kernel(); *(s32 *)loc = val; + pax_close_kernel(); if ((s64)val != *(s32 *)loc) goto overflow; break; case R_X86_64_PC32: val -= (u64)loc; + pax_open_kernel(); *(u32 *)loc = val; + pax_close_kernel(); + #if 0 if ((s64)val != *(s32 *)loc) goto overflow; diff -urNp linux-2.6.39.1/arch/x86/kernel/paravirt.c linux-2.6.39.1/arch/x86/kernel/paravirt.c --- linux-2.6.39.1/arch/x86/kernel/paravirt.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/paravirt.c 2011-05-22 19:36:30.000000000 -0400 @@ -122,7 +122,7 @@ unsigned paravirt_patch_jmp(void *insnbu * corresponding structure. */ static void *get_call_destination(u8 type) { - struct paravirt_patch_template tmpl = { + const struct paravirt_patch_template tmpl = { .pv_init_ops = pv_init_ops, .pv_time_ops = pv_time_ops, .pv_cpu_ops = pv_cpu_ops, @@ -133,6 +133,9 @@ static void *get_call_destination(u8 typ .pv_lock_ops = pv_lock_ops, #endif }; + + pax_track_stack(); + return *((void **)&tmpl + type); } @@ -145,14 +148,14 @@ unsigned paravirt_patch_default(u8 type, if (opfunc == NULL) /* If there's no function, patch it with a ud2a (BUG) */ ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); - else if (opfunc == _paravirt_nop) + else if (opfunc == (void *)_paravirt_nop) /* If the operation is a nop, then nop the callsite */ ret = paravirt_patch_nop(); /* identity functions just return their single argument */ - else if (opfunc == _paravirt_ident_32) + else if (opfunc == (void *)_paravirt_ident_32) ret = paravirt_patch_ident_32(insnbuf, len); - else if (opfunc == _paravirt_ident_64) + else if (opfunc == (void *)_paravirt_ident_64) ret = paravirt_patch_ident_64(insnbuf, len); else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || @@ -178,7 +181,7 @@ unsigned paravirt_patch_insns(void *insn if (insn_len > len || start == NULL) insn_len = len; else - memcpy(insnbuf, start, insn_len); + memcpy(insnbuf, ktla_ktva(start), insn_len); return insn_len; } @@ -294,22 +297,22 @@ void arch_flush_lazy_mmu_mode(void) preempt_enable(); } -struct pv_info pv_info = { +struct pv_info pv_info __read_only = { .name = "bare hardware", .paravirt_enabled = 0, .kernel_rpl = 0, .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */ }; -struct pv_init_ops pv_init_ops = { +struct pv_init_ops pv_init_ops __read_only = { .patch = native_patch, }; -struct pv_time_ops pv_time_ops = { +struct pv_time_ops pv_time_ops __read_only = { .sched_clock = native_sched_clock, }; -struct pv_irq_ops pv_irq_ops = { +struct pv_irq_ops pv_irq_ops __read_only = { .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), @@ -321,7 +324,7 @@ struct pv_irq_ops pv_irq_ops = { #endif }; -struct pv_cpu_ops pv_cpu_ops = { +struct pv_cpu_ops pv_cpu_ops __read_only = { .cpuid = native_cpuid, .get_debugreg = native_get_debugreg, .set_debugreg = native_set_debugreg, @@ -382,7 +385,7 @@ struct pv_cpu_ops pv_cpu_ops = { .end_context_switch = paravirt_nop, }; -struct pv_apic_ops pv_apic_ops = { +struct pv_apic_ops pv_apic_ops __read_only = { #ifdef CONFIG_X86_LOCAL_APIC .startup_ipi_hook = paravirt_nop, #endif @@ -396,7 +399,7 @@ struct pv_apic_ops pv_apic_ops = { #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) #endif -struct pv_mmu_ops pv_mmu_ops = { +struct pv_mmu_ops pv_mmu_ops __read_only = { .read_cr2 = native_read_cr2, .write_cr2 = native_write_cr2, @@ -465,6 +468,12 @@ struct pv_mmu_ops pv_mmu_ops = { }, .set_fixmap = native_set_fixmap, + +#ifdef CONFIG_PAX_KERNEXEC + .pax_open_kernel = native_pax_open_kernel, + .pax_close_kernel = native_pax_close_kernel, +#endif + }; EXPORT_SYMBOL_GPL(pv_time_ops); diff -urNp linux-2.6.39.1/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.39.1/arch/x86/kernel/paravirt-spinlocks.c --- linux-2.6.39.1/arch/x86/kernel/paravirt-spinlocks.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/paravirt-spinlocks.c 2011-05-22 19:36:30.000000000 -0400 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t arch_spin_lock(lock); } -struct pv_lock_ops pv_lock_ops = { +struct pv_lock_ops pv_lock_ops __read_only = { #ifdef CONFIG_SMP .spin_is_locked = __ticket_spin_is_locked, .spin_is_contended = __ticket_spin_is_contended, diff -urNp linux-2.6.39.1/arch/x86/kernel/pci-calgary_64.c linux-2.6.39.1/arch/x86/kernel/pci-calgary_64.c --- linux-2.6.39.1/arch/x86/kernel/pci-calgary_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/pci-calgary_64.c 2011-05-22 19:36:30.000000000 -0400 @@ -179,13 +179,13 @@ static void calioc2_dump_error_regs(stru static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl); static void get_tce_space_from_tar(void); -static struct cal_chipset_ops calgary_chip_ops = { +static const struct cal_chipset_ops calgary_chip_ops = { .handle_quirks = calgary_handle_quirks, .tce_cache_blast = calgary_tce_cache_blast, .dump_error_regs = calgary_dump_error_regs }; -static struct cal_chipset_ops calioc2_chip_ops = { +static const struct cal_chipset_ops calioc2_chip_ops = { .handle_quirks = calioc2_handle_quirks, .tce_cache_blast = calioc2_tce_cache_blast, .dump_error_regs = calioc2_dump_error_regs @@ -476,7 +476,7 @@ static void calgary_free_coherent(struct free_pages((unsigned long)vaddr, get_order(size)); } -static struct dma_map_ops calgary_dma_ops = { +static const struct dma_map_ops calgary_dma_ops = { .alloc_coherent = calgary_alloc_coherent, .free_coherent = calgary_free_coherent, .map_sg = calgary_map_sg, diff -urNp linux-2.6.39.1/arch/x86/kernel/pci-dma.c linux-2.6.39.1/arch/x86/kernel/pci-dma.c --- linux-2.6.39.1/arch/x86/kernel/pci-dma.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/pci-dma.c 2011-05-22 19:36:30.000000000 -0400 @@ -16,7 +16,7 @@ static int forbid_dac __read_mostly; -struct dma_map_ops *dma_ops = &nommu_dma_ops; +const struct dma_map_ops *dma_ops = &nommu_dma_ops; EXPORT_SYMBOL(dma_ops); static int iommu_sac_force __read_mostly; @@ -250,7 +250,7 @@ early_param("iommu", iommu_setup); int dma_supported(struct device *dev, u64 mask) { - struct dma_map_ops *ops = get_dma_ops(dev); + const struct dma_map_ops *ops = get_dma_ops(dev); #ifdef CONFIG_PCI if (mask > 0xffffffff && forbid_dac > 0) { diff -urNp linux-2.6.39.1/arch/x86/kernel/pci-gart_64.c linux-2.6.39.1/arch/x86/kernel/pci-gart_64.c --- linux-2.6.39.1/arch/x86/kernel/pci-gart_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/pci-gart_64.c 2011-05-22 19:36:30.000000000 -0400 @@ -695,7 +695,7 @@ static __init int init_amd_gatt(struct a return -1; } -static struct dma_map_ops gart_dma_ops = { +static const struct dma_map_ops gart_dma_ops = { .map_sg = gart_map_sg, .unmap_sg = gart_unmap_sg, .map_page = gart_map_page, diff -urNp linux-2.6.39.1/arch/x86/kernel/pci-iommu_table.c linux-2.6.39.1/arch/x86/kernel/pci-iommu_table.c --- linux-2.6.39.1/arch/x86/kernel/pci-iommu_table.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/pci-iommu_table.c 2011-05-22 19:36:30.000000000 -0400 @@ -2,7 +2,7 @@ #include #include #include - +#include #define DEBUG 1 @@ -53,6 +53,8 @@ void __init check_iommu_entries(struct i char sym_p[KSYM_SYMBOL_LEN]; char sym_q[KSYM_SYMBOL_LEN]; + pax_track_stack(); + /* Simple cyclic dependency checker. */ for (p = start; p < finish; p++) { q = find_dependents_of(start, finish, p); diff -urNp linux-2.6.39.1/arch/x86/kernel/pci-nommu.c linux-2.6.39.1/arch/x86/kernel/pci-nommu.c --- linux-2.6.39.1/arch/x86/kernel/pci-nommu.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/pci-nommu.c 2011-05-22 19:36:30.000000000 -0400 @@ -95,7 +95,7 @@ static void nommu_sync_sg_for_device(str flush_write_buffers(); } -struct dma_map_ops nommu_dma_ops = { +const struct dma_map_ops nommu_dma_ops = { .alloc_coherent = dma_generic_alloc_coherent, .free_coherent = nommu_free_coherent, .map_sg = nommu_map_sg, diff -urNp linux-2.6.39.1/arch/x86/kernel/pci-swiotlb.c linux-2.6.39.1/arch/x86/kernel/pci-swiotlb.c --- linux-2.6.39.1/arch/x86/kernel/pci-swiotlb.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/pci-swiotlb.c 2011-05-22 19:36:30.000000000 -0400 @@ -26,7 +26,7 @@ static void *x86_swiotlb_alloc_coherent( return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); } -static struct dma_map_ops swiotlb_dma_ops = { +static const struct dma_map_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, .alloc_coherent = x86_swiotlb_alloc_coherent, .free_coherent = swiotlb_free_coherent, diff -urNp linux-2.6.39.1/arch/x86/kernel/process_32.c linux-2.6.39.1/arch/x86/kernel/process_32.c --- linux-2.6.39.1/arch/x86/kernel/process_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/process_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as unsigned long thread_saved_pc(struct task_struct *tsk) { return ((unsigned long *)tsk->thread.sp)[3]; +//XXX return tsk->thread.eip; } #ifndef CONFIG_SMP @@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i unsigned long sp; unsigned short ss, gs; - if (user_mode_vm(regs)) { + if (user_mode(regs)) { sp = regs->sp; ss = regs->ss & 0xffff; - gs = get_user_gs(regs); } else { sp = kernel_stack_pointer(regs); savesegment(ss, ss); - savesegment(gs, gs); } + gs = get_user_gs(regs); show_regs_common(); @@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag struct task_struct *tsk; int err; - childregs = task_pt_regs(p); + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8; *childregs = *regs; childregs->ax = 0; childregs->sp = sp; p->thread.sp = (unsigned long) childregs; p->thread.sp0 = (unsigned long) (childregs+1); + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p); p->thread.ip = (unsigned long) ret_from_fork; @@ -293,7 +294,7 @@ __switch_to(struct task_struct *prev_p, struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(init_tss, cpu); + struct tss_struct *tss = init_tss + cpu; bool preload_fpu; /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ @@ -328,6 +329,10 @@ __switch_to(struct task_struct *prev_p, */ lazy_save_gs(prev->gs); +#ifdef CONFIG_PAX_MEMORY_UDEREF + __set_fs(task_thread_info(next_p)->addr_limit); +#endif + /* * Load the per-thread Thread-Local Storage descriptor. */ @@ -363,6 +368,9 @@ __switch_to(struct task_struct *prev_p, */ arch_end_context_switch(next_p); + percpu_write(current_task, next_p); + percpu_write(current_tinfo, &next_p->tinfo); + if (preload_fpu) __math_state_restore(); @@ -372,8 +380,6 @@ __switch_to(struct task_struct *prev_p, if (prev->gs | next->gs) lazy_load_gs(next->gs); - percpu_write(current_task, next_p); - return prev_p; } @@ -403,4 +409,3 @@ unsigned long get_wchan(struct task_stru } while (count++ < 16); return 0; } - diff -urNp linux-2.6.39.1/arch/x86/kernel/process_64.c linux-2.6.39.1/arch/x86/kernel/process_64.c --- linux-2.6.39.1/arch/x86/kernel/process_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/process_64.c 2011-05-22 19:36:30.000000000 -0400 @@ -87,7 +87,7 @@ static void __exit_idle(void) void exit_idle(void) { /* idle loop has pid 0 */ - if (current->pid) + if (task_pid_nr(current)) return; __exit_idle(); } @@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag struct pt_regs *childregs; struct task_struct *me = current; - childregs = ((struct pt_regs *) - (THREAD_SIZE + task_stack_page(p))) - 1; + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16; *childregs = *regs; childregs->ax = 0; @@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag p->thread.sp = (unsigned long) childregs; p->thread.sp0 = (unsigned long) (childregs+1); p->thread.usersp = me->thread.usersp; + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p); set_tsk_thread_flag(p, TIF_FORK); @@ -376,7 +376,7 @@ __switch_to(struct task_struct *prev_p, struct thread_struct *prev = &prev_p->thread; struct thread_struct *next = &next_p->thread; int cpu = smp_processor_id(); - struct tss_struct *tss = &per_cpu(init_tss, cpu); + struct tss_struct *tss = init_tss + cpu; unsigned fsindex, gsindex; bool preload_fpu; @@ -472,10 +472,9 @@ __switch_to(struct task_struct *prev_p, prev->usersp = percpu_read(old_rsp); percpu_write(old_rsp, next->usersp); percpu_write(current_task, next_p); + percpu_write(current_tinfo, &next_p->tinfo); - percpu_write(kernel_stack, - (unsigned long)task_stack_page(next_p) + - THREAD_SIZE - KERNEL_STACK_OFFSET); + percpu_write(kernel_stack, next->sp0); /* * Now maybe reload the debug registers and handle I/O bitmaps @@ -537,12 +536,11 @@ unsigned long get_wchan(struct task_stru if (!p || p == current || p->state == TASK_RUNNING) return 0; stack = (unsigned long)task_stack_page(p); - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE) + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64)) return 0; fp = *(u64 *)(p->thread.sp); do { - if (fp < (unsigned long)stack || - fp >= (unsigned long)stack+THREAD_SIZE) + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64)) return 0; ip = *(u64 *)(fp+8); if (!in_sched_functions(ip)) diff -urNp linux-2.6.39.1/arch/x86/kernel/process.c linux-2.6.39.1/arch/x86/kernel/process.c --- linux-2.6.39.1/arch/x86/kernel/process.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/process.c 2011-05-22 19:36:30.000000000 -0400 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru void free_thread_info(struct thread_info *ti) { - free_thread_xstate(ti->task); free_pages((unsigned long)ti, get_order(THREAD_SIZE)); } +static struct kmem_cache *task_struct_cachep; + void arch_task_cache_init(void) { - task_xstate_cachep = - kmem_cache_create("task_xstate", xstate_size, + /* create a slab on which task_structs can be allocated */ + task_struct_cachep = + kmem_cache_create("task_struct", sizeof(struct task_struct), + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL); + + task_xstate_cachep = + kmem_cache_create("task_xstate", xstate_size, __alignof__(union thread_xstate), - SLAB_PANIC | SLAB_NOTRACK, NULL); + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL); +} + +struct task_struct *alloc_task_struct_node(int node) +{ + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); +} + +void free_task_struct(struct task_struct *task) +{ + free_thread_xstate(task); + kmem_cache_free(task_struct_cachep, task); } /* @@ -70,7 +87,7 @@ void exit_thread(void) unsigned long *bp = t->io_bitmap_ptr; if (bp) { - struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); + struct tss_struct *tss = init_tss + get_cpu(); t->io_bitmap_ptr = NULL; clear_thread_flag(TIF_IO_BITMAP); @@ -106,7 +123,7 @@ void show_regs_common(void) printk(KERN_CONT "\n"); printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s", - current->pid, current->comm, print_tainted(), + task_pid_nr(current), current->comm, print_tainted(), init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); @@ -120,6 +137,9 @@ void flush_thread(void) { struct task_struct *tsk = current; +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF) + loadsegment(gs, 0); +#endif flush_ptrace_hw_breakpoint(tsk); memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); /* @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi regs.di = (unsigned long) arg; #ifdef CONFIG_X86_32 - regs.ds = __USER_DS; - regs.es = __USER_DS; + regs.ds = __KERNEL_DS; + regs.es = __KERNEL_DS; regs.fs = __KERNEL_PERCPU; - regs.gs = __KERNEL_STACK_CANARY; + savesegment(gs, regs.gs); #else regs.ss = __KERNEL_DS; #endif @@ -401,7 +421,7 @@ void default_idle(void) EXPORT_SYMBOL(default_idle); #endif -void stop_this_cpu(void *dummy) +__noreturn void stop_this_cpu(void *dummy) { local_irq_disable(); /* @@ -665,16 +685,34 @@ static int __init idle_setup(char *str) } early_param("idle", idle_setup); -unsigned long arch_align_stack(unsigned long sp) +#ifdef CONFIG_PAX_RANDKSTACK +asmlinkage void pax_randomize_kstack(void) { - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) - sp -= get_random_int() % 8192; - return sp & ~0xf; -} + struct thread_struct *thread = ¤t->thread; + unsigned long time; -unsigned long arch_randomize_brk(struct mm_struct *mm) -{ - unsigned long range_end = mm->brk + 0x02000000; - return randomize_range(mm->brk, range_end, 0) ? : mm->brk; -} + if (!randomize_va_space) + return; + + rdtscl(time); + + /* P4 seems to return a 0 LSB, ignore it */ +#ifdef CONFIG_MPENTIUM4 + time &= 0x3EUL; + time <<= 2; +#elif defined(CONFIG_X86_64) + time &= 0xFUL; + time <<= 4; +#else + time &= 0x1FUL; + time <<= 3; +#endif + + thread->sp0 ^= time; + load_sp0(init_tss + smp_processor_id(), thread); +#ifdef CONFIG_X86_64 + percpu_write(kernel_stack, thread->sp0); +#endif +} +#endif diff -urNp linux-2.6.39.1/arch/x86/kernel/ptrace.c linux-2.6.39.1/arch/x86/kernel/ptrace.c --- linux-2.6.39.1/arch/x86/kernel/ptrace.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/ptrace.c 2011-05-22 19:36:30.000000000 -0400 @@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi unsigned long addr, unsigned long data) { int ret; - unsigned long __user *datap = (unsigned long __user *)data; + unsigned long __user *datap = (__force unsigned long __user *)data; switch (request) { /* read the word at location addr in the USER area. */ @@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi if ((int) addr < 0) return -EIO; ret = do_get_thread_area(child, addr, - (struct user_desc __user *)data); + (__force struct user_desc __user *) data); break; case PTRACE_SET_THREAD_AREA: if ((int) addr < 0) return -EIO; ret = do_set_thread_area(child, addr, - (struct user_desc __user *)data, 0); + (__force struct user_desc __user *) data, 0); break; #endif @@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas memset(info, 0, sizeof(*info)); info->si_signo = SIGTRAP; info->si_code = si_code; - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL; + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL; } void user_single_step_siginfo(struct task_struct *tsk, @@ -1363,7 +1363,7 @@ void send_sigtrap(struct task_struct *ts * We must return the syscall number to actually look up in the table. * This can be -1L to skip running any syscall at all. */ -asmregparm long syscall_trace_enter(struct pt_regs *regs) +long syscall_trace_enter(struct pt_regs *regs) { long ret = 0; @@ -1408,7 +1408,7 @@ asmregparm long syscall_trace_enter(stru return ret ?: regs->orig_ax; } -asmregparm void syscall_trace_leave(struct pt_regs *regs) +void syscall_trace_leave(struct pt_regs *regs) { bool step; diff -urNp linux-2.6.39.1/arch/x86/kernel/pvclock.c linux-2.6.39.1/arch/x86/kernel/pvclock.c --- linux-2.6.39.1/arch/x86/kernel/pvclock.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/pvclock.c 2011-05-22 19:36:30.000000000 -0400 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc return pv_tsc_khz; } -static atomic64_t last_value = ATOMIC64_INIT(0); +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0); void pvclock_resume(void) { - atomic64_set(&last_value, 0); + atomic64_set_unchecked(&last_value, 0); } cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct * updating at the same time, and one of them could be slightly behind, * making the assumption that last_value always go forward fail to hold. */ - last = atomic64_read(&last_value); + last = atomic64_read_unchecked(&last_value); do { if (ret < last) return last; - last = atomic64_cmpxchg(&last_value, last, ret); + last = atomic64_cmpxchg_unchecked(&last_value, last, ret); } while (unlikely(last != ret)); return ret; diff -urNp linux-2.6.39.1/arch/x86/kernel/reboot.c linux-2.6.39.1/arch/x86/kernel/reboot.c --- linux-2.6.39.1/arch/x86/kernel/reboot.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/reboot.c 2011-05-23 17:07:00.000000000 -0400 @@ -35,7 +35,7 @@ void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); static const struct desc_ptr no_idt = {}; -static int reboot_mode; +static unsigned short reboot_mode; enum reboot_type reboot_type = BOOT_KBD; int reboot_force; @@ -307,13 +307,17 @@ core_initcall(reboot_init); extern const unsigned char machine_real_restart_asm[]; extern const u64 machine_real_restart_gdt[3]; -void machine_real_restart(unsigned int type) +__noreturn void machine_real_restart(unsigned int type) { void *restart_va; unsigned long restart_pa; - void (*restart_lowmem)(unsigned int); + void (* __noreturn restart_lowmem)(unsigned int); u64 *lowmem_gdt; +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)) + struct desc_struct *gdt; +#endif + local_irq_disable(); /* Write zero to CMOS register number 0x0f, which the BIOS POST @@ -339,14 +343,14 @@ void machine_real_restart(unsigned int t boot)". This seems like a fairly standard thing that gets set by REBOOT.COM programs, and the previous reset routine did this too. */ - *((unsigned short *)0x472) = reboot_mode; + *(unsigned short *)(__va(0x472)) = reboot_mode; /* Patch the GDT in the low memory trampoline */ lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt); restart_va = TRAMPOLINE_SYM(machine_real_restart_asm); restart_pa = virt_to_phys(restart_va); - restart_lowmem = (void (*)(unsigned int))restart_pa; + restart_lowmem = (void *)restart_pa; /* GDT[0]: GDT self-pointer */ lowmem_gdt[0] = @@ -357,7 +361,33 @@ void machine_real_restart(unsigned int t GDT_ENTRY(0x009b, restart_pa, 0xffff); /* Jump to the identity-mapped low memory code */ + +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)) + gdt = get_cpu_gdt_table(smp_processor_id()); + pax_open_kernel(); +#ifdef CONFIG_PAX_MEMORY_UDEREF + gdt[GDT_ENTRY_KERNEL_DS].type = 3; + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf; + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory"); +#endif +#ifdef CONFIG_PAX_KERNEXEC + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0; + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0; + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0; + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff; + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf; + gdt[GDT_ENTRY_KERNEL_CS].g = 1; +#endif + pax_close_kernel(); +#endif + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type)); + unreachable(); +#else restart_lowmem(type); +#endif + } #ifdef CONFIG_APM_MODULE EXPORT_SYMBOL(machine_real_restart); @@ -478,7 +508,7 @@ void __attribute__((weak)) mach_reboot_f { } -static void native_machine_emergency_restart(void) +__noreturn static void native_machine_emergency_restart(void) { int i; @@ -593,13 +623,13 @@ void native_machine_shutdown(void) #endif } -static void __machine_emergency_restart(int emergency) +static __noreturn void __machine_emergency_restart(int emergency) { reboot_emergency = emergency; machine_ops.emergency_restart(); } -static void native_machine_restart(char *__unused) +static __noreturn void native_machine_restart(char *__unused) { printk("machine restart\n"); @@ -608,7 +638,7 @@ static void native_machine_restart(char __machine_emergency_restart(0); } -static void native_machine_halt(void) +static __noreturn void native_machine_halt(void) { /* stop other cpus and apics */ machine_shutdown(); @@ -619,7 +649,7 @@ static void native_machine_halt(void) stop_this_cpu(NULL); } -static void native_machine_power_off(void) +__noreturn static void native_machine_power_off(void) { if (pm_power_off) { if (!reboot_force) @@ -628,6 +658,7 @@ static void native_machine_power_off(voi } /* a fallback in case there is no PM info available */ tboot_shutdown(TB_SHUTDOWN_HALT); + unreachable(); } struct machine_ops machine_ops = { diff -urNp linux-2.6.39.1/arch/x86/kernel/setup.c linux-2.6.39.1/arch/x86/kernel/setup.c --- linux-2.6.39.1/arch/x86/kernel/setup.c 2011-06-03 00:04:13.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/setup.c 2011-06-03 00:32:04.000000000 -0400 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void) * area (640->1Mb) as ram even though it is not. * take them out. */ - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1); + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1); sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); } @@ -775,14 +775,14 @@ void __init setup_arch(char **cmdline_p) if (!boot_params.hdr.root_flags) root_mountflags &= ~MS_RDONLY; - init_mm.start_code = (unsigned long) _text; - init_mm.end_code = (unsigned long) _etext; + init_mm.start_code = ktla_ktva((unsigned long) _text); + init_mm.end_code = ktla_ktva((unsigned long) _etext); init_mm.end_data = (unsigned long) _edata; init_mm.brk = _brk_end; - code_resource.start = virt_to_phys(_text); - code_resource.end = virt_to_phys(_etext)-1; - data_resource.start = virt_to_phys(_etext); + code_resource.start = virt_to_phys(ktla_ktva(_text)); + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1; + data_resource.start = virt_to_phys(_sdata); data_resource.end = virt_to_phys(_edata)-1; bss_resource.start = virt_to_phys(&__bss_start); bss_resource.end = virt_to_phys(&__bss_stop)-1; diff -urNp linux-2.6.39.1/arch/x86/kernel/setup_percpu.c linux-2.6.39.1/arch/x86/kernel/setup_percpu.c --- linux-2.6.39.1/arch/x86/kernel/setup_percpu.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/setup_percpu.c 2011-06-04 20:08:30.000000000 -0400 @@ -21,19 +21,17 @@ #include #include -DEFINE_PER_CPU(int, cpu_number); +#ifdef CONFIG_SMP +DEFINE_PER_CPU(unsigned int, cpu_number); EXPORT_PER_CPU_SYMBOL(cpu_number); +#endif -#ifdef CONFIG_X86_64 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load) -#else -#define BOOT_PERCPU_OFFSET 0 -#endif DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; EXPORT_PER_CPU_SYMBOL(this_cpu_off); -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { +unsigned long __per_cpu_offset[NR_CPUS] __read_only = { [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET, }; EXPORT_SYMBOL(__per_cpu_offset); @@ -155,10 +153,10 @@ static inline void setup_percpu_segment( { #ifdef CONFIG_X86_32 struct desc_struct gdt; + unsigned long base = per_cpu_offset(cpu); - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF, - 0x2 | DESCTYPE_S, 0x8); - gdt.s = 1; + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT, + 0x83 | DESCTYPE_S, 0xC); write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); #endif @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void) /* alrighty, percpu areas up and running */ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) { +#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_X86_32 + unsigned long canary = per_cpu(stack_canary.canary, cpu); +#endif +#endif per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); per_cpu(cpu_number, cpu) = cpu; @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void) */ set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); #endif +#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_X86_32 + if (!cpu) + per_cpu(stack_canary.canary, cpu) = canary; +#endif +#endif /* * Up to this point, the boot CPU has been using .init.data * area. Reload any changed state for the boot CPU. diff -urNp linux-2.6.39.1/arch/x86/kernel/signal.c linux-2.6.39.1/arch/x86/kernel/signal.c --- linux-2.6.39.1/arch/x86/kernel/signal.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/signal.c 2011-05-23 17:07:00.000000000 -0400 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi * Align the stack pointer according to the i386 ABI, * i.e. so that on function entry ((sp + 4) & 15) == 0. */ - sp = ((sp + 4) & -16ul) - 4; + sp = ((sp - 12) & -16ul) - 4; #else /* !CONFIG_X86_32 */ sp = round_down(sp, 16) - 8; #endif @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str * Return an always-bogus address instead so we will die with SIGSEGV. */ if (onsigstack && !likely(on_sig_stack(sp))) - return (void __user *)-1L; + return (__force void __user *)-1L; /* save i387 state */ if (used_math() && save_i387_xstate(*fpstate) < 0) - return (void __user *)-1L; + return (__force void __user *)-1L; return (void __user *)sp; } @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio } if (current->mm->context.vdso) - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); else - restorer = &frame->retcode; + restorer = (void __user *)&frame->retcode; if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio * reasons and because gdb uses it as a signature to notice * signal handler stack frames. */ - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode); + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode); if (err) return -EFAULT; @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); /* Set up to return from userspace. */ - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); + if (current->mm->context.vdso) + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); + else + restorer = (void __user *)&frame->retcode; if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; put_user_ex(restorer, &frame->pretcode); @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str * reasons and because gdb uses it as a signature to notice * signal handler stack frames. */ - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode); + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode); } put_user_catch(err); if (err) @@ -773,6 +776,8 @@ static void do_signal(struct pt_regs *re int signr; sigset_t *oldset; + pax_track_stack(); + /* * We want the common case to go fast, which is why we may in certain * cases get here from kernel mode. Just return without doing anything @@ -780,7 +785,7 @@ static void do_signal(struct pt_regs *re * X86_32: vm86 regs switched out by assembly code before reaching * here, so testing against kernel CS suffices. */ - if (!user_mode(regs)) + if (!user_mode_novm(regs)) return; if (current_thread_info()->status & TS_RESTORE_SIGMASK) diff -urNp linux-2.6.39.1/arch/x86/kernel/smpboot.c linux-2.6.39.1/arch/x86/kernel/smpboot.c --- linux-2.6.39.1/arch/x86/kernel/smpboot.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/smpboot.c 2011-05-22 19:36:30.000000000 -0400 @@ -696,17 +696,20 @@ static int __cpuinit do_boot_cpu(int api set_idle_for_cpu(cpu, c_idle.idle); do_rest: per_cpu(current_task, cpu) = c_idle.idle; + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo; #ifdef CONFIG_X86_32 /* Stack for startup_32 can be just as for start_secondary onwards */ irq_ctx_init(cpu); #else clear_tsk_thread_flag(c_idle.idle, TIF_FORK); initial_gs = per_cpu_offset(cpu); - per_cpu(kernel_stack, cpu) = - (unsigned long)task_stack_page(c_idle.idle) - - KERNEL_STACK_OFFSET + THREAD_SIZE; + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE; #endif + + pax_open_kernel(); early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); + pax_close_kernel(); + initial_code = (unsigned long)start_secondary; stack_start = c_idle.idle->thread.sp; @@ -848,6 +851,12 @@ int __cpuinit native_cpu_up(unsigned int per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; +#ifdef CONFIG_PAX_PER_CPU_PGD + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); +#endif + err = do_boot_cpu(apicid, cpu); if (err) { pr_debug("do_boot_cpu failed %d\n", err); diff -urNp linux-2.6.39.1/arch/x86/kernel/step.c linux-2.6.39.1/arch/x86/kernel/step.c --- linux-2.6.39.1/arch/x86/kernel/step.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/step.c 2011-05-22 19:36:30.000000000 -0400 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc struct desc_struct *desc; unsigned long base; - seg &= ~7UL; + seg >>= 3; mutex_lock(&child->mm->context.lock); - if (unlikely((seg >> 3) >= child->mm->context.size)) + if (unlikely(seg >= child->mm->context.size)) addr = -1L; /* bogus selector, access would fault */ else { desc = child->mm->context.ldt + seg; @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc addr += base; } mutex_unlock(&child->mm->context.lock); - } + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS) + addr = ktla_ktva(addr); return addr; } @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t unsigned char opcode[15]; unsigned long addr = convert_ip_to_linear(child, regs); + if (addr == -EINVAL) + return 0; + copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); for (i = 0; i < copied; i++) { switch (opcode[i]) { @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t #ifdef CONFIG_X86_64 case 0x40 ... 0x4f: - if (regs->cs != __USER_CS) + if ((regs->cs & 0xffff) != __USER_CS) /* 32-bit mode: register increment */ return 0; /* 64-bit mode: REX prefix */ diff -urNp linux-2.6.39.1/arch/x86/kernel/syscall_table_32.S linux-2.6.39.1/arch/x86/kernel/syscall_table_32.S --- linux-2.6.39.1/arch/x86/kernel/syscall_table_32.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/syscall_table_32.S 2011-05-22 19:36:30.000000000 -0400 @@ -1,3 +1,4 @@ +.section .rodata,"a",@progbits ENTRY(sys_call_table) .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ .long sys_exit diff -urNp linux-2.6.39.1/arch/x86/kernel/sys_i386_32.c linux-2.6.39.1/arch/x86/kernel/sys_i386_32.c --- linux-2.6.39.1/arch/x86/kernel/sys_i386_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/sys_i386_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -24,17 +24,224 @@ #include -/* - * Do a system call from kernel instead of calling sys_execve so we - * end up with proper pt_regs. - */ -int kernel_execve(const char *filename, - const char *const argv[], - const char *const envp[]) +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) { - long __res; - asm volatile ("int $0x80" - : "=a" (__res) - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory"); - return __res; + unsigned long pax_task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif + + if (len > pax_task_size || addr > pax_task_size - len) + return -EINVAL; + + return 0; +} + +unsigned long +arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long start_addr, pax_task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif + + pax_task_size -= PAGE_SIZE; + + if (len > pax_task_size) + return -ENOMEM; + + if (flags & MAP_FIXED) + return addr; + +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + + if (addr) { + addr = PAGE_ALIGN(addr); + if (pax_task_size - len >= addr) { + vma = find_vma(mm, addr); + if (check_heap_stack_gap(vma, addr, len)) + return addr; + } + } + if (len > mm->cached_hole_size) { + start_addr = addr = mm->free_area_cache; + } else { + start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; + } + +#ifdef CONFIG_PAX_PAGEEXEC + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) { + start_addr = 0x00110000UL; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + start_addr += mm->delta_mmap & 0x03FFF000UL; +#endif + + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base) + start_addr = addr = mm->mmap_base; + else + addr = start_addr; + } +#endif + +full_search: + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { + /* At this point: (!vma || addr < vma->vm_end). */ + if (pax_task_size - len < addr) { + /* + * Start a new search - just in case we missed + * some holes. + */ + if (start_addr != mm->mmap_base) { + start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; + goto full_search; + } + return -ENOMEM; + } + if (check_heap_stack_gap(vma, addr, len)) + break; + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + addr = vma->vm_end; + if (mm->start_brk <= addr && addr < mm->mmap_base) { + start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; + goto full_search; + } + } + + /* + * Remember the place where we stopped the search: + */ + mm->free_area_cache = addr + len; + return addr; +} + +unsigned long +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + const unsigned long len, const unsigned long pgoff, + const unsigned long flags) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif + + pax_task_size -= PAGE_SIZE; + + /* requested length too big for entire address space */ + if (len > pax_task_size) + return -ENOMEM; + + if (flags & MAP_FIXED) + return addr; + +#ifdef CONFIG_PAX_PAGEEXEC + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) + goto bottomup; +#endif + +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + + /* requesting a specific address */ + if (addr) { + addr = PAGE_ALIGN(addr); + if (pax_task_size - len >= addr) { + vma = find_vma(mm, addr); + if (check_heap_stack_gap(vma, addr, len)) + return addr; + } + } + + /* check if free_area_cache is useful for us */ + if (len <= mm->cached_hole_size) { + mm->cached_hole_size = 0; + mm->free_area_cache = mm->mmap_base; + } + + /* either no address requested or can't fit in requested address hole */ + addr = mm->free_area_cache; + + /* make sure it can fit in the remaining address space */ + if (addr > len) { + vma = find_vma(mm, addr-len); + if (check_heap_stack_gap(vma, addr - len, len)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } + + if (mm->mmap_base < len) + goto bottomup; + + addr = mm->mmap_base-len; + + do { + /* + * Lookup failure means no vma is above this address, + * else if new region fits below vma->vm_start, + * return with success: + */ + vma = find_vma(mm, addr); + if (check_heap_stack_gap(vma, addr, len)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + + /* remember the largest hole we saw so far */ + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ + addr = skip_heap_stack_gap(vma, len); + } while (!IS_ERR_VALUE(addr)); + +bottomup: + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE; + else +#endif + + mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + + mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); + /* + * Restore the topdown base: + */ + mm->mmap_base = base; + mm->free_area_cache = base; + mm->cached_hole_size = ~0UL; + + return addr; } diff -urNp linux-2.6.39.1/arch/x86/kernel/sys_x86_64.c linux-2.6.39.1/arch/x86/kernel/sys_x86_64.c --- linux-2.6.39.1/arch/x86/kernel/sys_x86_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/sys_x86_64.c 2011-05-22 19:36:30.000000000 -0400 @@ -32,8 +32,8 @@ out: return error; } -static void find_start_end(unsigned long flags, unsigned long *begin, - unsigned long *end) +static void find_start_end(struct mm_struct *mm, unsigned long flags, + unsigned long *begin, unsigned long *end) { if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { unsigned long new_begin; @@ -52,7 +52,7 @@ static void find_start_end(unsigned long *begin = new_begin; } } else { - *begin = TASK_UNMAPPED_BASE; + *begin = mm->mmap_base; *end = TASK_SIZE; } } @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp if (flags & MAP_FIXED) return addr; - find_start_end(flags, &begin, &end); + find_start_end(mm, flags, &begin, &end); if (len > end) return -ENOMEM; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); - if (end - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (end - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) @@ -106,7 +109,7 @@ full_search: } return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { + if (check_heap_stack_gap(vma, addr, len)) { /* * Remember the place where we stopped the search: */ @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; - unsigned long addr = addr0; + unsigned long base = mm->mmap_base, addr = addr0; /* requested length too big for entire address space */ if (len > TASK_SIZE) @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) goto bottomup; +#ifdef CONFIG_PAX_RANDMMAP + if (!(mm->pax_flags & MF_PAX_RANDMMAP)) +#endif + /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) - return addr; + if (TASK_SIZE - len >= addr) { + vma = find_vma(mm, addr); + if (check_heap_stack_gap(vma, addr, len)) + return addr; + } } /* check if free_area_cache is useful for us */ @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi /* make sure it can fit in the remaining address space */ if (addr > len) { vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) + if (check_heap_stack_gap(vma, addr - len, len)) /* remember the address as a hint for next time */ return mm->free_area_cache = addr-len; } @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi * return with success: */ vma = find_vma(mm, addr); - if (!vma || addr+len <= vma->vm_start) + if (check_heap_stack_gap(vma, addr, len)) /* remember the address as a hint for next time */ return mm->free_area_cache = addr; @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ - addr = vma->vm_start-len; - } while (len < vma->vm_start); + addr = skip_heap_stack_gap(vma, len); + } while (!IS_ERR_VALUE(addr)); bottomup: /* @@ -198,13 +206,21 @@ bottomup: * can happen with large stack limits and large mmap() * allocations. */ + mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + + mm->free_area_cache = mm->mmap_base; mm->cached_hole_size = ~0UL; - mm->free_area_cache = TASK_UNMAPPED_BASE; addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); /* * Restore the topdown base: */ - mm->free_area_cache = mm->mmap_base; + mm->mmap_base = base; + mm->free_area_cache = base; mm->cached_hole_size = ~0UL; return addr; diff -urNp linux-2.6.39.1/arch/x86/kernel/tboot.c linux-2.6.39.1/arch/x86/kernel/tboot.c --- linux-2.6.39.1/arch/x86/kernel/tboot.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/tboot.c 2011-05-22 19:36:30.000000000 -0400 @@ -218,7 +218,7 @@ static int tboot_setup_sleep(void) void tboot_shutdown(u32 shutdown_type) { - void (*shutdown)(void); + void (* __noreturn shutdown)(void); if (!tboot_enabled()) return; @@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type) switch_to_tboot_pt(); - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry; + shutdown = (void *)tboot->shutdown_entry; shutdown(); /* should not reach here */ @@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1 tboot_shutdown(acpi_shutdown_map[sleep_state]); } -static atomic_t ap_wfs_count; +static atomic_unchecked_t ap_wfs_count; static int tboot_wait_for_aps(int num_aps) { @@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback( { switch (action) { case CPU_DYING: - atomic_inc(&ap_wfs_count); + atomic_inc_unchecked(&ap_wfs_count); if (num_online_cpus() == 1) - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count))) + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count))) return NOTIFY_BAD; break; } @@ -342,7 +342,7 @@ static __init int tboot_late_init(void) tboot_create_trampoline(); - atomic_set(&ap_wfs_count, 0); + atomic_set_unchecked(&ap_wfs_count, 0); register_hotcpu_notifier(&tboot_cpu_notifier); return 0; } diff -urNp linux-2.6.39.1/arch/x86/kernel/time.c linux-2.6.39.1/arch/x86/kernel/time.c --- linux-2.6.39.1/arch/x86/kernel/time.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/time.c 2011-05-22 19:36:30.000000000 -0400 @@ -22,17 +22,13 @@ #include #include -#ifdef CONFIG_X86_64 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; -#endif - unsigned long profile_pc(struct pt_regs *regs) { unsigned long pc = instruction_pointer(regs); - if (!user_mode_vm(regs) && in_lock_functions(pc)) { + if (!user_mode(regs) && in_lock_functions(pc)) { #ifdef CONFIG_FRAME_POINTER - return *(unsigned long *)(regs->bp + sizeof(long)); + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long))); #else unsigned long *sp = (unsigned long *)kernel_stack_pointer(regs); @@ -41,11 +37,17 @@ unsigned long profile_pc(struct pt_regs * or above a saved flags. Eflags has bits 22-31 zero, * kernel addresses don't. */ + +#ifdef CONFIG_PAX_KERNEXEC + return ktla_ktva(sp[0]); +#else if (sp[0] >> 22) return sp[0]; if (sp[1] >> 22) return sp[1]; #endif + +#endif } return pc; } diff -urNp linux-2.6.39.1/arch/x86/kernel/tls.c linux-2.6.39.1/arch/x86/kernel/tls.c --- linux-2.6.39.1/arch/x86/kernel/tls.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/tls.c 2011-05-22 19:36:30.000000000 -0400 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return -EINVAL; +#ifdef CONFIG_PAX_SEGMEXEC + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE)) + return -EINVAL; +#endif + set_tls_desc(p, idx, &info, 1); return 0; diff -urNp linux-2.6.39.1/arch/x86/kernel/trampoline_32.S linux-2.6.39.1/arch/x86/kernel/trampoline_32.S --- linux-2.6.39.1/arch/x86/kernel/trampoline_32.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/trampoline_32.S 2011-05-22 19:36:30.000000000 -0400 @@ -32,6 +32,12 @@ #include #include +#ifdef CONFIG_PAX_KERNEXEC +#define ta(X) (X) +#else +#define ta(X) ((X) - __PAGE_OFFSET) +#endif + #ifdef CONFIG_SMP .section ".x86_trampoline","a" @@ -62,7 +68,7 @@ r_base = . inc %ax # protected mode (PE) bit lmsw %ax # into protected mode # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET) + ljmpl $__BOOT_CS, $ta(startup_32_smp) # These need to be in the same 64K segment as the above; # hence we don't use the boot_gdt_descr defined in head.S diff -urNp linux-2.6.39.1/arch/x86/kernel/trampoline_64.S linux-2.6.39.1/arch/x86/kernel/trampoline_64.S --- linux-2.6.39.1/arch/x86/kernel/trampoline_64.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/trampoline_64.S 2011-05-22 19:36:30.000000000 -0400 @@ -90,7 +90,7 @@ startup_32: movl $__KERNEL_DS, %eax # Initialize the %ds segment register movl %eax, %ds - movl $X86_CR4_PAE, %eax + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax movl %eax, %cr4 # Enable PAE mode # Setup trampoline 4 level pagetables @@ -138,7 +138,7 @@ tidt: # so the kernel can live anywhere .balign 4 tgdt: - .short tgdt_end - tgdt # gdt limit + .short tgdt_end - tgdt - 1 # gdt limit .long tgdt - r_base .short 0 .quad 0x00cf9b000000ffff # __KERNEL32_CS diff -urNp linux-2.6.39.1/arch/x86/kernel/traps.c linux-2.6.39.1/arch/x86/kernel/traps.c --- linux-2.6.39.1/arch/x86/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/traps.c 2011-05-22 19:36:30.000000000 -0400 @@ -70,12 +70,6 @@ asmlinkage int system_call(void); /* Do we ignore FPU interrupts ? */ char ignore_fpu_irq; - -/* - * The IDT has to be page-aligned to simplify the Pentium - * F0 0F bug workaround. - */ -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; #endif DECLARE_BITMAP(used_vectors, NR_VECTORS); @@ -117,13 +111,13 @@ static inline void preempt_conditional_c } static void __kprobes -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs, long error_code, siginfo_t *info) { struct task_struct *tsk = current; #ifdef CONFIG_X86_32 - if (regs->flags & X86_VM_MASK) { + if (v8086_mode(regs)) { /* * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. * On nmi (interrupt 2), do_trap should not be called. @@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str } #endif - if (!user_mode(regs)) + if (!user_mode_novm(regs)) goto kernel_trap; #ifdef CONFIG_X86_32 @@ -157,7 +151,7 @@ trap_signal: printk_ratelimit()) { printk(KERN_INFO "%s[%d] trap %s ip:%lx sp:%lx error:%lx", - tsk->comm, tsk->pid, str, + tsk->comm, task_pid_nr(tsk), str, regs->ip, regs->sp, error_code); print_vma_addr(" in ", regs->ip); printk("\n"); @@ -174,8 +168,20 @@ kernel_trap: if (!fixup_exception(regs)) { tsk->thread.error_code = error_code; tsk->thread.trap_no = trapnr; + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)) + str = "PAX: suspicious stack segment fault"; +#endif + die(str, regs, error_code); } + +#ifdef CONFIG_PAX_REFCOUNT + if (trapnr == 4) + pax_report_refcount_overflow(regs); +#endif + return; #ifdef CONFIG_X86_32 @@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re conditional_sti(regs); #ifdef CONFIG_X86_32 - if (regs->flags & X86_VM_MASK) + if (v8086_mode(regs)) goto gp_in_vm86; #endif tsk = current; - if (!user_mode(regs)) + if (!user_mode_novm(regs)) goto gp_in_kernel; +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) { + struct mm_struct *mm = tsk->mm; + unsigned long limit; + + down_write(&mm->mmap_sem); + limit = mm->context.user_cs_limit; + if (limit < TASK_SIZE) { + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC); + up_write(&mm->mmap_sem); + return; + } + up_write(&mm->mmap_sem); + } +#endif + tsk->thread.error_code = error_code; tsk->thread.trap_no = 13; @@ -304,6 +326,13 @@ gp_in_kernel: if (notify_die(DIE_GPF, "general protection fault", regs, error_code, 13, SIGSEGV) == NOTIFY_STOP) return; + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS) + die("PAX: suspicious general protection fault", regs, error_code); + else +#endif + die("general protection fault", regs, error_code); } @@ -569,7 +598,7 @@ dotraplinkage void __kprobes do_debug(st /* It's safe to allow irq's after DR6 has been saved */ preempt_conditional_sti(regs); - if (regs->flags & X86_VM_MASK) { + if (v8086_mode(regs)) { handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); preempt_conditional_cli(regs); @@ -583,7 +612,7 @@ dotraplinkage void __kprobes do_debug(st * We already checked v86 mode above, so we can check for kernel mode * by just checking the CPL of CS. */ - if ((dr6 & DR_STEP) && !user_mode(regs)) { + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) { tsk->thread.debugreg6 &= ~DR_STEP; set_tsk_thread_flag(tsk, TIF_SINGLESTEP); regs->flags &= ~X86_EFLAGS_TF; @@ -612,7 +641,7 @@ void math_error(struct pt_regs *regs, in return; conditional_sti(regs); - if (!user_mode_vm(regs)) + if (!user_mode(regs)) { if (!fixup_exception(regs)) { task->thread.error_code = error_code; @@ -723,7 +752,7 @@ asmlinkage void __attribute__((weak)) sm void __math_state_restore(void) { struct thread_info *thread = current_thread_info(); - struct task_struct *tsk = thread->task; + struct task_struct *tsk = current; /* * Paranoid restore. send a SIGSEGV if we fail to restore the state. @@ -750,8 +779,7 @@ void __math_state_restore(void) */ asmlinkage void math_state_restore(void) { - struct thread_info *thread = current_thread_info(); - struct task_struct *tsk = thread->task; + struct task_struct *tsk = current; if (!tsk_used_math(tsk)) { local_irq_enable(); diff -urNp linux-2.6.39.1/arch/x86/kernel/vm86_32.c linux-2.6.39.1/arch/x86/kernel/vm86_32.c --- linux-2.6.39.1/arch/x86/kernel/vm86_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/vm86_32.c 2011-05-22 19:41:32.000000000 -0400 @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke do_exit(SIGSEGV); } - tss = &per_cpu(init_tss, get_cpu()); + tss = init_tss + get_cpu(); current->thread.sp0 = current->thread.saved_sp0; current->thread.sysenter_cs = __KERNEL_CS; load_sp0(tss, ¤t->thread); @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use struct task_struct *tsk; int tmp, ret = -EPERM; +#ifdef CONFIG_GRKERNSEC_VM86 + if (!capable(CAP_SYS_RAWIO)) { + gr_handle_vm86(); + goto out; + } +#endif + tsk = current; if (tsk->thread.saved_sp0) goto out; @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned int tmp, ret; struct vm86plus_struct __user *v86; +#ifdef CONFIG_GRKERNSEC_VM86 + if (!capable(CAP_SYS_RAWIO)) { + gr_handle_vm86(); + ret = -EPERM; + goto out; + } +#endif + tsk = current; switch (cmd) { case VM86_REQUEST_IRQ: @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm tsk->thread.saved_fs = info->regs32->fs; tsk->thread.saved_gs = get_user_gs(info->regs32); - tss = &per_cpu(init_tss, get_cpu()); + tss = init_tss + get_cpu(); tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; if (cpu_has_sep) tsk->thread.sysenter_cs = 0; @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re goto cannot_handle; if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored)) goto cannot_handle; - intr_ptr = (unsigned long __user *) (i << 2); + intr_ptr = (__force unsigned long __user *) (i << 2); if (get_user(segoffs, intr_ptr)) goto cannot_handle; if ((segoffs >> 16) == BIOSSEG) diff -urNp linux-2.6.39.1/arch/x86/kernel/vmlinux.lds.S linux-2.6.39.1/arch/x86/kernel/vmlinux.lds.S --- linux-2.6.39.1/arch/x86/kernel/vmlinux.lds.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/vmlinux.lds.S 2011-05-22 19:36:30.000000000 -0400 @@ -26,6 +26,13 @@ #include #include #include +#include + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR) +#else +#define __KERNEL_TEXT_OFFSET 0 +#endif #undef i386 /* in case the preprocessor is a 32bit one */ @@ -34,11 +41,9 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF #ifdef CONFIG_X86_32 OUTPUT_ARCH(i386) ENTRY(phys_startup_32) -jiffies = jiffies_64; #else OUTPUT_ARCH(i386:x86-64) ENTRY(phys_startup_64) -jiffies_64 = jiffies; #endif #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) @@ -69,31 +74,46 @@ jiffies_64 = jiffies; PHDRS { text PT_LOAD FLAGS(5); /* R_E */ +#ifdef CONFIG_X86_32 + module PT_LOAD FLAGS(5); /* R_E */ +#endif +#ifdef CONFIG_XEN + rodata PT_LOAD FLAGS(5); /* R_E */ +#else + rodata PT_LOAD FLAGS(4); /* R__ */ +#endif data PT_LOAD FLAGS(6); /* RW_ */ #ifdef CONFIG_X86_64 user PT_LOAD FLAGS(5); /* R_E */ +#endif + init.begin PT_LOAD FLAGS(6); /* RW_ */ #ifdef CONFIG_SMP percpu PT_LOAD FLAGS(6); /* RW_ */ #endif + text.init PT_LOAD FLAGS(5); /* R_E */ + text.exit PT_LOAD FLAGS(5); /* R_E */ init PT_LOAD FLAGS(7); /* RWE */ -#endif note PT_NOTE FLAGS(0); /* ___ */ } SECTIONS { #ifdef CONFIG_X86_32 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; - phys_startup_32 = startup_32 - LOAD_OFFSET; + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR; #else - . = __START_KERNEL; - phys_startup_64 = startup_64 - LOAD_OFFSET; + . = __START_KERNEL; #endif /* Text and read-only data */ - .text : AT(ADDR(.text) - LOAD_OFFSET) { - _text = .; + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { /* bootstrapping code */ +#ifdef CONFIG_X86_32 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; +#else + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; +#endif + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; + _text = .; HEAD_TEXT #ifdef CONFIG_X86_32 . = ALIGN(PAGE_SIZE); @@ -109,13 +129,47 @@ SECTIONS IRQENTRY_TEXT *(.fixup) *(.gnu.warning) - /* End of text section */ - _etext = .; } :text = 0x9090 - NOTES :text :note + . += __KERNEL_TEXT_OFFSET; + +#ifdef CONFIG_X86_32 + . = ALIGN(PAGE_SIZE); + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) { + +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES) + MODULES_EXEC_VADDR = .; + BYTE(0) + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024); + . = ALIGN(HPAGE_SIZE); + MODULES_EXEC_END = . - 1; +#endif + + } :module +#endif + + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) { + /* End of text section */ + _etext = . - __KERNEL_TEXT_OFFSET; + } - EXCEPTION_TABLE(16) :text = 0x9090 +#ifdef CONFIG_X86_32 + . = ALIGN(PAGE_SIZE); + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) { + *(.idt) + . = ALIGN(PAGE_SIZE); + *(.empty_zero_page) + *(.initial_pg_fixmap) + *(.initial_pg_pmd) + *(.initial_page_table) + *(.swapper_pg_dir) + } :rodata +#endif + + . = ALIGN(PAGE_SIZE); + NOTES :rodata :note + + EXCEPTION_TABLE(16) :rodata #if defined(CONFIG_DEBUG_RODATA) /* .text should occupy whole number of pages */ @@ -127,16 +181,20 @@ SECTIONS /* Data */ .data : AT(ADDR(.data) - LOAD_OFFSET) { + +#ifdef CONFIG_PAX_KERNEXEC + . = ALIGN(HPAGE_SIZE); +#else + . = ALIGN(PAGE_SIZE); +#endif + /* Start of data section */ _sdata = .; /* init_task */ INIT_TASK_DATA(THREAD_SIZE) -#ifdef CONFIG_X86_32 - /* 32 bit has nosave before _edata */ NOSAVE_DATA -#endif PAGE_ALIGNED_DATA(PAGE_SIZE) @@ -145,6 +203,8 @@ SECTIONS DATA_DATA CONSTRUCTORS + jiffies = jiffies_64; + /* rarely changed data like cpu maps */ READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES) @@ -199,12 +259,6 @@ SECTIONS } vgetcpu_mode = VVIRT(.vgetcpu_mode); - . = ALIGN(L1_CACHE_BYTES); - .jiffies : AT(VLOAD(.jiffies)) { - *(.jiffies) - } - jiffies = VVIRT(.jiffies); - .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { *(.vsyscall_3) } @@ -220,12 +274,19 @@ SECTIONS #endif /* CONFIG_X86_64 */ /* Init code and data - will be freed after init */ - . = ALIGN(PAGE_SIZE); .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { + BYTE(0) + +#ifdef CONFIG_PAX_KERNEXEC + . = ALIGN(HPAGE_SIZE); +#else + . = ALIGN(PAGE_SIZE); +#endif + __init_begin = .; /* paired with __init_end */ - } + } :init.begin -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) +#ifdef CONFIG_SMP /* * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the * output PHDR, so the next output section - .init.text - should @@ -234,12 +295,27 @@ SECTIONS PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu) #endif - INIT_TEXT_SECTION(PAGE_SIZE) -#ifdef CONFIG_X86_64 - :init -#endif + . = ALIGN(PAGE_SIZE); + init_begin = .; + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) { + VMLINUX_SYMBOL(_sinittext) = .; + INIT_TEXT + VMLINUX_SYMBOL(_einittext) = .; + . = ALIGN(PAGE_SIZE); + } :text.init - INIT_DATA_SECTION(16) + /* + * .exit.text is discard at runtime, not link time, to deal with + * references from .altinstructions and .eh_frame + */ + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { + EXIT_TEXT + . = ALIGN(16); + } :text.exit + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text); + + . = ALIGN(PAGE_SIZE); + INIT_DATA_SECTION(16) :init /* * Code and data for a variety of lowlevel trampolines, to be @@ -306,19 +382,12 @@ SECTIONS } . = ALIGN(8); - /* - * .exit.text is discard at runtime, not link time, to deal with - * references from .altinstructions and .eh_frame - */ - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { - EXIT_TEXT - } .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA } -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) +#ifndef CONFIG_SMP PERCPU(INTERNODE_CACHE_BYTES, PAGE_SIZE) #endif @@ -337,16 +406,10 @@ SECTIONS .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { __smp_locks = .; *(.smp_locks) - . = ALIGN(PAGE_SIZE); __smp_locks_end = .; + . = ALIGN(PAGE_SIZE); } -#ifdef CONFIG_X86_64 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { - NOSAVE_DATA - } -#endif - /* BSS */ . = ALIGN(PAGE_SIZE); .bss : AT(ADDR(.bss) - LOAD_OFFSET) { @@ -362,6 +425,7 @@ SECTIONS __brk_base = .; . += 64 * 1024; /* 64k alignment slop space */ *(.brk_reservation) /* areas brk users have reserved */ + . = ALIGN(HPAGE_SIZE); __brk_limit = .; } @@ -388,13 +452,12 @@ SECTIONS * for the boot processor. */ #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load -INIT_PER_CPU(gdt_page); INIT_PER_CPU(irq_stack_union); /* * Build-time check on the image size: */ -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE), "kernel image bigger than KERNEL_IMAGE_SIZE"); #ifdef CONFIG_SMP diff -urNp linux-2.6.39.1/arch/x86/kernel/vsyscall_64.c linux-2.6.39.1/arch/x86/kernel/vsyscall_64.c --- linux-2.6.39.1/arch/x86/kernel/vsyscall_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/vsyscall_64.c 2011-05-22 19:36:30.000000000 -0400 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); /* copy vsyscall data */ + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name); vsyscall_gtod_data.clock.vread = clock->vread; vsyscall_gtod_data.clock.cycle_last = clock->cycle_last; vsyscall_gtod_data.clock.mask = clock->mask; @@ -208,7 +209,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s We do this here because otherwise user space would do it on its own in a likely inferior way (no access to jiffies). If you don't like it pass NULL. */ - if (tcache && tcache->blob[0] == (j = __jiffies)) { + if (tcache && tcache->blob[0] == (j = jiffies)) { p = tcache->blob[1]; } else if (__vgetcpu_mode == VGETCPU_RDTSCP) { /* Load per CPU data from RDTSCP */ diff -urNp linux-2.6.39.1/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.39.1/arch/x86/kernel/x8664_ksyms_64.c --- linux-2.6.39.1/arch/x86/kernel/x8664_ksyms_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/x8664_ksyms_64.c 2011-05-22 19:36:30.000000000 -0400 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8); EXPORT_SYMBOL(copy_user_generic_string); EXPORT_SYMBOL(copy_user_generic_unrolled); EXPORT_SYMBOL(__copy_user_nocache); -EXPORT_SYMBOL(_copy_from_user); -EXPORT_SYMBOL(_copy_to_user); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(clear_page); diff -urNp linux-2.6.39.1/arch/x86/kernel/xsave.c linux-2.6.39.1/arch/x86/kernel/xsave.c --- linux-2.6.39.1/arch/x86/kernel/xsave.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kernel/xsave.c 2011-05-22 19:36:30.000000000 -0400 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_ fx_sw_user->xstate_size > fx_sw_user->extended_size) return -EINVAL; - err = __get_user(magic2, (__u32 *) (((void *)fpstate) + + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) + fx_sw_user->extended_size - FP_XSTATE_MAGIC2_SIZE)); if (err) @@ -267,7 +267,7 @@ fx_only: * the other extended state. */ xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE); - return fxrstor_checking((__force struct i387_fxsave_struct *)buf); + return fxrstor_checking((struct i387_fxsave_struct __user *)buf); } /* @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf if (use_xsave()) err = restore_user_xstate(buf); else - err = fxrstor_checking((__force struct i387_fxsave_struct *) + err = fxrstor_checking((struct i387_fxsave_struct __user *) buf); if (unlikely(err)) { /* diff -urNp linux-2.6.39.1/arch/x86/kvm/emulate.c linux-2.6.39.1/arch/x86/kvm/emulate.c --- linux-2.6.39.1/arch/x86/kvm/emulate.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kvm/emulate.c 2011-05-22 19:36:30.000000000 -0400 @@ -89,7 +89,7 @@ #define Src2ImmByte (2<<29) #define Src2One (3<<29) #define Src2Imm (4<<29) -#define Src2Mask (7<<29) +#define Src2Mask (7U<<29) #define X2(x...) x, x #define X3(x...) X2(x), x @@ -190,6 +190,7 @@ struct group_dual { #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \ do { \ + unsigned long _tmp; \ __asm__ __volatile__ ( \ _PRE_EFLAGS("0", "4", "2") \ _op _suffix " %"_x"3,%1; " \ @@ -203,8 +204,6 @@ struct group_dual { /* Raw emulation: instruction has two explicit operands. */ #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \ do { \ - unsigned long _tmp; \ - \ switch ((_dst).bytes) { \ case 2: \ ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\ @@ -220,7 +219,6 @@ struct group_dual { #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \ do { \ - unsigned long _tmp; \ switch ((_dst).bytes) { \ case 1: \ ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \ diff -urNp linux-2.6.39.1/arch/x86/kvm/lapic.c linux-2.6.39.1/arch/x86/kvm/lapic.c --- linux-2.6.39.1/arch/x86/kvm/lapic.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kvm/lapic.c 2011-05-22 19:36:30.000000000 -0400 @@ -53,7 +53,7 @@ #define APIC_BUS_CYCLE_NS 1 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ -#define apic_debug(fmt, arg...) +#define apic_debug(fmt, arg...) do {} while (0) #define APIC_LVT_NUM 6 /* 14 is the version for Xeon and Pentium 8.4.8*/ diff -urNp linux-2.6.39.1/arch/x86/kvm/mmu.c linux-2.6.39.1/arch/x86/kvm/mmu.c --- linux-2.6.39.1/arch/x86/kvm/mmu.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kvm/mmu.c 2011-05-22 19:36:30.000000000 -0400 @@ -3240,7 +3240,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu * pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter); + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter); /* * Assume that the pte write on a page table of the same type @@ -3275,7 +3275,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu * smp_rmb(); spin_lock(&vcpu->kvm->mmu_lock); - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter) + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter) gentry = 0; kvm_mmu_free_some_pages(vcpu); ++vcpu->kvm->stat.mmu_pte_write; diff -urNp linux-2.6.39.1/arch/x86/kvm/paging_tmpl.h linux-2.6.39.1/arch/x86/kvm/paging_tmpl.h --- linux-2.6.39.1/arch/x86/kvm/paging_tmpl.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kvm/paging_tmpl.h 2011-05-22 19:36:30.000000000 -0400 @@ -552,6 +552,8 @@ static int FNAME(page_fault)(struct kvm_ unsigned long mmu_seq; bool map_writable; + pax_track_stack(); + pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); r = mmu_topup_memory_caches(vcpu); @@ -672,7 +674,7 @@ static void FNAME(invlpg)(struct kvm_vcp if (need_flush) kvm_flush_remote_tlbs(vcpu->kvm); - atomic_inc(&vcpu->kvm->arch.invlpg_counter); + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter); spin_unlock(&vcpu->kvm->mmu_lock); diff -urNp linux-2.6.39.1/arch/x86/kvm/svm.c linux-2.6.39.1/arch/x86/kvm/svm.c --- linux-2.6.39.1/arch/x86/kvm/svm.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kvm/svm.c 2011-05-22 19:36:30.000000000 -0400 @@ -3278,7 +3278,11 @@ static void reload_tss(struct kvm_vcpu * int cpu = raw_smp_processor_id(); struct svm_cpu_data *sd = per_cpu(svm_data, cpu); + + pax_open_kernel(); sd->tss_desc->type = 9; /* available 32/64-bit TSS */ + pax_close_kernel(); + load_TR_desc(); } @@ -3656,6 +3660,10 @@ static void svm_vcpu_run(struct kvm_vcpu #endif #endif +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + __set_fs(current_thread_info()->addr_limit); +#endif + reload_tss(vcpu); local_irq_disable(); @@ -3871,7 +3879,7 @@ static void svm_fpu_deactivate(struct kv update_cr0_intercept(svm); } -static struct kvm_x86_ops svm_x86_ops = { +static const struct kvm_x86_ops svm_x86_ops = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, .hardware_setup = svm_hardware_setup, diff -urNp linux-2.6.39.1/arch/x86/kvm/vmx.c linux-2.6.39.1/arch/x86/kvm/vmx.c --- linux-2.6.39.1/arch/x86/kvm/vmx.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kvm/vmx.c 2011-05-22 19:36:30.000000000 -0400 @@ -725,7 +725,11 @@ static void reload_tss(void) struct desc_struct *descs; descs = (void *)gdt->address; + + pax_open_kernel(); descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ + pax_close_kernel(); + load_TR_desc(); } @@ -1648,8 +1652,11 @@ static __init int hardware_setup(void) if (!cpu_has_vmx_flexpriority()) flexpriority_enabled = 0; - if (!cpu_has_vmx_tpr_shadow()) - kvm_x86_ops->update_cr8_intercept = NULL; + if (!cpu_has_vmx_tpr_shadow()) { + pax_open_kernel(); + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL; + pax_close_kernel(); + } if (enable_ept && !cpu_has_vmx_ept_2m_page()) kvm_disable_largepages(); @@ -2693,7 +2700,7 @@ static int vmx_vcpu_setup(struct vcpu_vm vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); @@ -4068,6 +4075,12 @@ static void __noclone vmx_vcpu_run(struc "jmp .Lkvm_vmx_return \n\t" ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t" ".Lkvm_vmx_return: " + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + "ljmp %[cs],$.Lkvm_vmx_return2\n\t" + ".Lkvm_vmx_return2: " +#endif + /* Save guest registers, load host registers, keep flags */ "mov %0, %c[wordsize](%%"R"sp) \n\t" "pop %0 \n\t" @@ -4116,6 +4129,11 @@ static void __noclone vmx_vcpu_run(struc #endif [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), [wordsize]"i"(sizeof(ulong)) + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + ,[cs]"i"(__KERNEL_CS) +#endif + : "cc", "memory" , R"ax", R"bx", R"di", R"si" #ifdef CONFIG_X86_64 @@ -4130,7 +4148,16 @@ static void __noclone vmx_vcpu_run(struc vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS)); + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) + loadsegment(fs, __KERNEL_PERCPU); +#endif + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) + __set_fs(current_thread_info()->addr_limit); +#endif + vmx->launched = 1; vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); @@ -4368,7 +4395,7 @@ static void vmx_set_supported_cpuid(u32 { } -static struct kvm_x86_ops vmx_x86_ops = { +static const struct kvm_x86_ops vmx_x86_ops = { .cpu_has_kvm_support = cpu_has_kvm_support, .disabled_by_bios = vmx_disabled_by_bios, .hardware_setup = hardware_setup, diff -urNp linux-2.6.39.1/arch/x86/kvm/x86.c linux-2.6.39.1/arch/x86/kvm/x86.c --- linux-2.6.39.1/arch/x86/kvm/x86.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/kvm/x86.c 2011-05-22 19:36:30.000000000 -0400 @@ -94,7 +94,7 @@ static void update_cr8_intercept(struct static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries); -struct kvm_x86_ops *kvm_x86_ops; +const struct kvm_x86_ops *kvm_x86_ops; EXPORT_SYMBOL_GPL(kvm_x86_ops); int ignore_msrs = 0; @@ -2050,6 +2050,8 @@ long kvm_arch_dev_ioctl(struct file *fil if (n < msr_list.nmsrs) goto out; r = -EFAULT; + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save)) + goto out; if (copy_to_user(user_msr_list->indices, &msrs_to_save, num_msrs_to_save * sizeof(u32))) goto out; @@ -2217,15 +2219,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries) { - int r; + int r, i; r = -E2BIG; if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) goto out; r = -EFAULT; - if (copy_from_user(&vcpu->arch.cpuid_entries, entries, - cpuid->nent * sizeof(struct kvm_cpuid_entry2))) + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2))) goto out; + for (i = 0; i < cpuid->nent; ++i) { + struct kvm_cpuid_entry2 cpuid_entry; + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry))) + goto out; + vcpu->arch.cpuid_entries[i] = cpuid_entry; + } vcpu->arch.cpuid_nent = cpuid->nent; kvm_apic_set_version(vcpu); kvm_x86_ops->cpuid_update(vcpu); @@ -2240,15 +2247,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries) { - int r; + int r, i; r = -E2BIG; if (cpuid->nent < vcpu->arch.cpuid_nent) goto out; r = -EFAULT; - if (copy_to_user(entries, &vcpu->arch.cpuid_entries, - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) goto out; + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i]; + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry))) + goto out; + } return 0; out: @@ -2526,7 +2537,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { - if (irq->irq < 0 || irq->irq >= 256) + if (irq->irq >= 256) return -EINVAL; if (irqchip_in_kernel(vcpu->kvm)) return -ENXIO; @@ -4672,7 +4683,7 @@ static unsigned long kvm_get_guest_ip(vo return ip; } -static struct perf_guest_info_callbacks kvm_guest_cbs = { +static const struct perf_guest_info_callbacks kvm_guest_cbs = { .is_in_guest = kvm_is_in_guest, .is_user_mode = kvm_is_user_mode, .get_guest_ip = kvm_get_guest_ip, @@ -4690,10 +4701,10 @@ void kvm_after_handle_nmi(struct kvm_vcp } EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); -int kvm_arch_init(void *opaque) +int kvm_arch_init(const void *opaque) { int r; - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque; if (kvm_x86_ops) { printk(KERN_ERR "kvm: already loaded the other module\n"); diff -urNp linux-2.6.39.1/arch/x86/lib/atomic64_32.c linux-2.6.39.1/arch/x86/lib/atomic64_32.c --- linux-2.6.39.1/arch/x86/lib/atomic64_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/lib/atomic64_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -8,18 +8,30 @@ long long atomic64_read_cx8(long long, const atomic64_t *v); EXPORT_SYMBOL(atomic64_read_cx8); +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v); +EXPORT_SYMBOL(atomic64_read_unchecked_cx8); long long atomic64_set_cx8(long long, const atomic64_t *v); EXPORT_SYMBOL(atomic64_set_cx8); +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v); +EXPORT_SYMBOL(atomic64_set_unchecked_cx8); long long atomic64_xchg_cx8(long long, unsigned high); EXPORT_SYMBOL(atomic64_xchg_cx8); long long atomic64_add_return_cx8(long long a, atomic64_t *v); EXPORT_SYMBOL(atomic64_add_return_cx8); +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v); +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8); long long atomic64_sub_return_cx8(long long a, atomic64_t *v); EXPORT_SYMBOL(atomic64_sub_return_cx8); +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v); +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8); long long atomic64_inc_return_cx8(long long a, atomic64_t *v); EXPORT_SYMBOL(atomic64_inc_return_cx8); +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v); +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8); long long atomic64_dec_return_cx8(long long a, atomic64_t *v); EXPORT_SYMBOL(atomic64_dec_return_cx8); +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v); +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8); long long atomic64_dec_if_positive_cx8(atomic64_t *v); EXPORT_SYMBOL(atomic64_dec_if_positive_cx8); int atomic64_inc_not_zero_cx8(atomic64_t *v); @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8); #ifndef CONFIG_X86_CMPXCHG64 long long atomic64_read_386(long long, const atomic64_t *v); EXPORT_SYMBOL(atomic64_read_386); +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v); +EXPORT_SYMBOL(atomic64_read_unchecked_386); long long atomic64_set_386(long long, const atomic64_t *v); EXPORT_SYMBOL(atomic64_set_386); +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v); +EXPORT_SYMBOL(atomic64_set_unchecked_386); long long atomic64_xchg_386(long long, unsigned high); EXPORT_SYMBOL(atomic64_xchg_386); long long atomic64_add_return_386(long long a, atomic64_t *v); EXPORT_SYMBOL(atomic64_add_return_386); +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v); +EXPORT_SYMBOL(atomic64_add_return_unchecked_386); long long atomic64_sub_return_386(long long a, atomic64_t *v); EXPORT_SYMBOL(atomic64_sub_return_386); +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v); +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386); long long atomic64_inc_return_386(long long a, atomic64_t *v); EXPORT_SYMBOL(atomic64_inc_return_386); +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v); +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386); long long atomic64_dec_return_386(long long a, atomic64_t *v); EXPORT_SYMBOL(atomic64_dec_return_386); +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v); +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386); long long atomic64_add_386(long long a, atomic64_t *v); EXPORT_SYMBOL(atomic64_add_386); +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v); +EXPORT_SYMBOL(atomic64_add_unchecked_386); long long atomic64_sub_386(long long a, atomic64_t *v); EXPORT_SYMBOL(atomic64_sub_386); +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v); +EXPORT_SYMBOL(atomic64_sub_unchecked_386); long long atomic64_inc_386(long long a, atomic64_t *v); EXPORT_SYMBOL(atomic64_inc_386); +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v); +EXPORT_SYMBOL(atomic64_inc_unchecked_386); long long atomic64_dec_386(long long a, atomic64_t *v); EXPORT_SYMBOL(atomic64_dec_386); +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v); +EXPORT_SYMBOL(atomic64_dec_unchecked_386); long long atomic64_dec_if_positive_386(atomic64_t *v); EXPORT_SYMBOL(atomic64_dec_if_positive_386); int atomic64_inc_not_zero_386(atomic64_t *v); diff -urNp linux-2.6.39.1/arch/x86/lib/atomic64_386_32.S linux-2.6.39.1/arch/x86/lib/atomic64_386_32.S --- linux-2.6.39.1/arch/x86/lib/atomic64_386_32.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/lib/atomic64_386_32.S 2011-05-22 19:36:30.000000000 -0400 @@ -48,6 +48,10 @@ BEGIN(read) movl (v), %eax movl 4(v), %edx RET_ENDP +BEGIN(read_unchecked) + movl (v), %eax + movl 4(v), %edx +RET_ENDP #undef v #define v %esi @@ -55,6 +59,10 @@ BEGIN(set) movl %ebx, (v) movl %ecx, 4(v) RET_ENDP +BEGIN(set_unchecked) + movl %ebx, (v) + movl %ecx, 4(v) +RET_ENDP #undef v #define v %esi @@ -70,6 +78,20 @@ RET_ENDP BEGIN(add) addl %eax, (v) adcl %edx, 4(v) + +#ifdef CONFIG_PAX_REFCOUNT + jno 0f + subl %eax, (v) + sbbl %edx, 4(v) + int $4 +0: + _ASM_EXTABLE(0b, 0b) +#endif + +RET_ENDP +BEGIN(add_unchecked) + addl %eax, (v) + adcl %edx, 4(v) RET_ENDP #undef v @@ -77,6 +99,24 @@ RET_ENDP BEGIN(add_return) addl (v), %eax adcl 4(v), %edx + +#ifdef CONFIG_PAX_REFCOUNT + into +1234: + _ASM_EXTABLE(1234b, 2f) +#endif + + movl %eax, (v) + movl %edx, 4(v) + +#ifdef CONFIG_PAX_REFCOUNT +2: +#endif + +RET_ENDP +BEGIN(add_return_unchecked) + addl (v), %eax + adcl 4(v), %edx movl %eax, (v) movl %edx, 4(v) RET_ENDP @@ -86,6 +126,20 @@ RET_ENDP BEGIN(sub) subl %eax, (v) sbbl %edx, 4(v) + +#ifdef CONFIG_PAX_REFCOUNT + jno 0f + addl %eax, (v) + adcl %edx, 4(v) + int $4 +0: + _ASM_EXTABLE(0b, 0b) +#endif + +RET_ENDP +BEGIN(sub_unchecked) + subl %eax, (v) + sbbl %edx, 4(v) RET_ENDP #undef v @@ -96,6 +150,27 @@ BEGIN(sub_return) sbbl $0, %edx addl (v), %eax adcl 4(v), %edx + +#ifdef CONFIG_PAX_REFCOUNT + into +1234: + _ASM_EXTABLE(1234b, 2f) +#endif + + movl %eax, (v) + movl %edx, 4(v) + +#ifdef CONFIG_PAX_REFCOUNT +2: +#endif + +RET_ENDP +BEGIN(sub_return_unchecked) + negl %edx + negl %eax + sbbl $0, %edx + addl (v), %eax + adcl 4(v), %edx movl %eax, (v) movl %edx, 4(v) RET_ENDP @@ -105,6 +180,20 @@ RET_ENDP BEGIN(inc) addl $1, (v) adcl $0, 4(v) + +#ifdef CONFIG_PAX_REFCOUNT + jno 0f + subl $1, (v) + sbbl $0, 4(v) + int $4 +0: + _ASM_EXTABLE(0b, 0b) +#endif + +RET_ENDP +BEGIN(inc_unchecked) + addl $1, (v) + adcl $0, 4(v) RET_ENDP #undef v @@ -114,6 +203,26 @@ BEGIN(inc_return) movl 4(v), %edx addl $1, %eax adcl $0, %edx + +#ifdef CONFIG_PAX_REFCOUNT + into +1234: + _ASM_EXTABLE(1234b, 2f) +#endif + + movl %eax, (v) + movl %edx, 4(v) + +#ifdef CONFIG_PAX_REFCOUNT +2: +#endif + +RET_ENDP +BEGIN(inc_return_unchecked) + movl (v), %eax + movl 4(v), %edx + addl $1, %eax + adcl $0, %edx movl %eax, (v) movl %edx, 4(v) RET_ENDP @@ -123,6 +232,20 @@ RET_ENDP BEGIN(dec) subl $1, (v) sbbl $0, 4(v) + +#ifdef CONFIG_PAX_REFCOUNT + jno 0f + addl $1, (v) + adcl $0, 4(v) + int $4 +0: + _ASM_EXTABLE(0b, 0b) +#endif + +RET_ENDP +BEGIN(dec_unchecked) + subl $1, (v) + sbbl $0, 4(v) RET_ENDP #undef v @@ -132,6 +255,26 @@ BEGIN(dec_return) movl 4(v), %edx subl $1, %eax sbbl $0, %edx + +#ifdef CONFIG_PAX_REFCOUNT + into +1234: + _ASM_EXTABLE(1234b, 2f) +#endif + + movl %eax, (v) + movl %edx, 4(v) + +#ifdef CONFIG_PAX_REFCOUNT +2: +#endif + +RET_ENDP +BEGIN(dec_return_unchecked) + movl (v), %eax + movl 4(v), %edx + subl $1, %eax + sbbl $0, %edx movl %eax, (v) movl %edx, 4(v) RET_ENDP @@ -143,6 +286,13 @@ BEGIN(add_unless) adcl %edx, %edi addl (v), %eax adcl 4(v), %edx + +#ifdef CONFIG_PAX_REFCOUNT + into +1234: + _ASM_EXTABLE(1234b, 2f) +#endif + cmpl %eax, %esi je 3f 1: @@ -168,6 +318,13 @@ BEGIN(inc_not_zero) 1: addl $1, %eax adcl $0, %edx + +#ifdef CONFIG_PAX_REFCOUNT + into +1234: + _ASM_EXTABLE(1234b, 2f) +#endif + movl %eax, (v) movl %edx, 4(v) movl $1, %eax @@ -186,6 +343,13 @@ BEGIN(dec_if_positive) movl 4(v), %edx subl $1, %eax sbbl $0, %edx + +#ifdef CONFIG_PAX_REFCOUNT + into +1234: + _ASM_EXTABLE(1234b, 1f) +#endif + js 1f movl %eax, (v) movl %edx, 4(v) diff -urNp linux-2.6.39.1/arch/x86/lib/atomic64_cx8_32.S linux-2.6.39.1/arch/x86/lib/atomic64_cx8_32.S --- linux-2.6.39.1/arch/x86/lib/atomic64_cx8_32.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/lib/atomic64_cx8_32.S 2011-05-22 19:36:30.000000000 -0400 @@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8) CFI_ENDPROC ENDPROC(atomic64_read_cx8) +ENTRY(atomic64_read_unchecked_cx8) + CFI_STARTPROC + + read64 %ecx + ret + CFI_ENDPROC +ENDPROC(atomic64_read_unchecked_cx8) + ENTRY(atomic64_set_cx8) CFI_STARTPROC @@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8) CFI_ENDPROC ENDPROC(atomic64_set_cx8) +ENTRY(atomic64_set_unchecked_cx8) + CFI_STARTPROC + +1: +/* we don't need LOCK_PREFIX since aligned 64-bit writes + * are atomic on 586 and newer */ + cmpxchg8b (%esi) + jne 1b + + ret + CFI_ENDPROC +ENDPROC(atomic64_set_unchecked_cx8) + ENTRY(atomic64_xchg_cx8) CFI_STARTPROC @@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8) CFI_ENDPROC ENDPROC(atomic64_xchg_cx8) -.macro addsub_return func ins insc -ENTRY(atomic64_\func\()_return_cx8) +.macro addsub_return func ins insc unchecked="" +ENTRY(atomic64_\func\()_return\unchecked\()_cx8) CFI_STARTPROC SAVE ebp SAVE ebx @@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8) movl %edx, %ecx \ins\()l %esi, %ebx \insc\()l %edi, %ecx + +.ifb \unchecked +#ifdef CONFIG_PAX_REFCOUNT + into +2: + _ASM_EXTABLE(2b, 3f) +#endif +.endif + LOCK_PREFIX cmpxchg8b (%ebp) jne 1b - -10: movl %ebx, %eax movl %ecx, %edx + +.ifb \unchecked +#ifdef CONFIG_PAX_REFCOUNT +3: +#endif +.endif + RESTORE edi RESTORE esi RESTORE ebx RESTORE ebp ret CFI_ENDPROC -ENDPROC(atomic64_\func\()_return_cx8) +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8) .endm addsub_return add add adc addsub_return sub sub sbb +addsub_return add add adc _unchecked +addsub_return sub sub sbb _unchecked -.macro incdec_return func ins insc -ENTRY(atomic64_\func\()_return_cx8) +.macro incdec_return func ins insc unchecked +ENTRY(atomic64_\func\()_return\unchecked\()_cx8) CFI_STARTPROC SAVE ebx @@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8) movl %edx, %ecx \ins\()l $1, %ebx \insc\()l $0, %ecx + +.ifb \unchecked +#ifdef CONFIG_PAX_REFCOUNT + into +2: + _ASM_EXTABLE(2b, 3f) +#endif +.endif + LOCK_PREFIX cmpxchg8b (%esi) jne 1b -10: movl %ebx, %eax movl %ecx, %edx + +.ifb \unchecked +#ifdef CONFIG_PAX_REFCOUNT +3: +#endif +.endif + RESTORE ebx ret CFI_ENDPROC -ENDPROC(atomic64_\func\()_return_cx8) +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8) .endm incdec_return inc add adc incdec_return dec sub sbb +incdec_return inc add adc _unchecked +incdec_return dec sub sbb _unchecked ENTRY(atomic64_dec_if_positive_cx8) CFI_STARTPROC @@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8) movl %edx, %ecx subl $1, %ebx sbb $0, %ecx + +#ifdef CONFIG_PAX_REFCOUNT + into +1234: + _ASM_EXTABLE(1234b, 2f) +#endif + js 2f LOCK_PREFIX cmpxchg8b (%esi) @@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8) movl %edx, %ecx addl %esi, %ebx adcl %edi, %ecx + +#ifdef CONFIG_PAX_REFCOUNT + into +1234: + _ASM_EXTABLE(1234b, 3f) +#endif + LOCK_PREFIX cmpxchg8b (%ebp) jne 1b @@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8) movl %edx, %ecx addl $1, %ebx adcl $0, %ecx + +#ifdef CONFIG_PAX_REFCOUNT + into +1234: + _ASM_EXTABLE(1234b, 3f) +#endif + LOCK_PREFIX cmpxchg8b (%esi) jne 1b diff -urNp linux-2.6.39.1/arch/x86/lib/checksum_32.S linux-2.6.39.1/arch/x86/lib/checksum_32.S --- linux-2.6.39.1/arch/x86/lib/checksum_32.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/lib/checksum_32.S 2011-05-22 19:36:30.000000000 -0400 @@ -28,7 +28,8 @@ #include #include #include - +#include + /* * computes a partial checksum, e.g. for TCP/UDP fragments */ @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic ( #define ARGBASE 16 #define FP 12 - -ENTRY(csum_partial_copy_generic) + +ENTRY(csum_partial_copy_generic_to_user) CFI_STARTPROC + +#ifdef CONFIG_PAX_MEMORY_UDEREF + pushl_cfi %gs + popl_cfi %es + jmp csum_partial_copy_generic +#endif + +ENTRY(csum_partial_copy_generic_from_user) + +#ifdef CONFIG_PAX_MEMORY_UDEREF + pushl_cfi %gs + popl_cfi %ds +#endif + +ENTRY(csum_partial_copy_generic) subl $4,%esp CFI_ADJUST_CFA_OFFSET 4 pushl_cfi %edi @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic) jmp 4f SRC(1: movw (%esi), %bx ) addl $2, %esi -DST( movw %bx, (%edi) ) +DST( movw %bx, %es:(%edi) ) addl $2, %edi addw %bx, %ax adcl $0, %eax @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) ) SRC(1: movl (%esi), %ebx ) SRC( movl 4(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, (%edi) ) +DST( movl %ebx, %es:(%edi) ) adcl %edx, %eax -DST( movl %edx, 4(%edi) ) +DST( movl %edx, %es:4(%edi) ) SRC( movl 8(%esi), %ebx ) SRC( movl 12(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, 8(%edi) ) +DST( movl %ebx, %es:8(%edi) ) adcl %edx, %eax -DST( movl %edx, 12(%edi) ) +DST( movl %edx, %es:12(%edi) ) SRC( movl 16(%esi), %ebx ) SRC( movl 20(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, 16(%edi) ) +DST( movl %ebx, %es:16(%edi) ) adcl %edx, %eax -DST( movl %edx, 20(%edi) ) +DST( movl %edx, %es:20(%edi) ) SRC( movl 24(%esi), %ebx ) SRC( movl 28(%esi), %edx ) adcl %ebx, %eax -DST( movl %ebx, 24(%edi) ) +DST( movl %ebx, %es:24(%edi) ) adcl %edx, %eax -DST( movl %edx, 28(%edi) ) +DST( movl %edx, %es:28(%edi) ) lea 32(%esi), %esi lea 32(%edi), %edi @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) ) shrl $2, %edx # This clears CF SRC(3: movl (%esi), %ebx ) adcl %ebx, %eax -DST( movl %ebx, (%edi) ) +DST( movl %ebx, %es:(%edi) ) lea 4(%esi), %esi lea 4(%edi), %edi dec %edx @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) ) jb 5f SRC( movw (%esi), %cx ) leal 2(%esi), %esi -DST( movw %cx, (%edi) ) +DST( movw %cx, %es:(%edi) ) leal 2(%edi), %edi je 6f shll $16,%ecx SRC(5: movb (%esi), %cl ) -DST( movb %cl, (%edi) ) +DST( movb %cl, %es:(%edi) ) 6: addl %ecx, %eax adcl $0, %eax 7: @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) ) 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr - movl $-EFAULT, (%ebx) + movl $-EFAULT, %ss:(%ebx) # zero the complete destination - computing the rest # is too much work @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) ) 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr - movl $-EFAULT,(%ebx) + movl $-EFAULT,%ss:(%ebx) jmp 5000b .previous + pushl_cfi %ss + popl_cfi %ds + pushl_cfi %ss + popl_cfi %es popl_cfi %ebx CFI_RESTORE ebx popl_cfi %esi @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) ) popl_cfi %ecx # equivalent to addl $4,%esp ret CFI_ENDPROC -ENDPROC(csum_partial_copy_generic) +ENDPROC(csum_partial_copy_generic_to_user) #else /* Version for PentiumII/PPro */ #define ROUND1(x) \ + nop; nop; nop; \ SRC(movl x(%esi), %ebx ) ; \ addl %ebx, %eax ; \ - DST(movl %ebx, x(%edi) ) ; + DST(movl %ebx, %es:x(%edi)) ; #define ROUND(x) \ + nop; nop; nop; \ SRC(movl x(%esi), %ebx ) ; \ adcl %ebx, %eax ; \ - DST(movl %ebx, x(%edi) ) ; + DST(movl %ebx, %es:x(%edi)) ; #define ARGBASE 12 - -ENTRY(csum_partial_copy_generic) + +ENTRY(csum_partial_copy_generic_to_user) CFI_STARTPROC + +#ifdef CONFIG_PAX_MEMORY_UDEREF + pushl_cfi %gs + popl_cfi %es + jmp csum_partial_copy_generic +#endif + +ENTRY(csum_partial_copy_generic_from_user) + +#ifdef CONFIG_PAX_MEMORY_UDEREF + pushl_cfi %gs + popl_cfi %ds +#endif + +ENTRY(csum_partial_copy_generic) pushl_cfi %ebx CFI_REL_OFFSET ebx, 0 pushl_cfi %edi @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic) subl %ebx, %edi lea -1(%esi),%edx andl $-32,%edx - lea 3f(%ebx,%ebx), %ebx + lea 3f(%ebx,%ebx,2), %ebx testl %esi, %esi jmp *%ebx 1: addl $64,%esi @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic) jb 5f SRC( movw (%esi), %dx ) leal 2(%esi), %esi -DST( movw %dx, (%edi) ) +DST( movw %dx, %es:(%edi) ) leal 2(%edi), %edi je 6f shll $16,%edx 5: SRC( movb (%esi), %dl ) -DST( movb %dl, (%edi) ) +DST( movb %dl, %es:(%edi) ) 6: addl %edx, %eax adcl $0, %eax 7: .section .fixup, "ax" 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr - movl $-EFAULT, (%ebx) + movl $-EFAULT, %ss:(%ebx) # zero the complete destination (computing the rest is too much work) movl ARGBASE+8(%esp),%edi # dst movl ARGBASE+12(%esp),%ecx # len @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) ) rep; stosb jmp 7b 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr - movl $-EFAULT, (%ebx) + movl $-EFAULT, %ss:(%ebx) jmp 7b .previous +#ifdef CONFIG_PAX_MEMORY_UDEREF + pushl_cfi %ss + popl_cfi %ds + pushl_cfi %ss + popl_cfi %es +#endif + popl_cfi %esi CFI_RESTORE esi popl_cfi %edi @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) ) CFI_RESTORE ebx ret CFI_ENDPROC -ENDPROC(csum_partial_copy_generic) +ENDPROC(csum_partial_copy_generic_to_user) #undef ROUND #undef ROUND1 diff -urNp linux-2.6.39.1/arch/x86/lib/clear_page_64.S linux-2.6.39.1/arch/x86/lib/clear_page_64.S --- linux-2.6.39.1/arch/x86/lib/clear_page_64.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/lib/clear_page_64.S 2011-05-22 19:36:30.000000000 -0400 @@ -43,7 +43,7 @@ ENDPROC(clear_page) #include - .section .altinstr_replacement,"ax" + .section .altinstr_replacement,"a" 1: .byte 0xeb /* jmp */ .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */ 2: diff -urNp linux-2.6.39.1/arch/x86/lib/copy_page_64.S linux-2.6.39.1/arch/x86/lib/copy_page_64.S --- linux-2.6.39.1/arch/x86/lib/copy_page_64.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/lib/copy_page_64.S 2011-05-22 19:36:30.000000000 -0400 @@ -104,7 +104,7 @@ ENDPROC(copy_page) #include - .section .altinstr_replacement,"ax" + .section .altinstr_replacement,"a" 1: .byte 0xeb /* jmp */ .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */ 2: diff -urNp linux-2.6.39.1/arch/x86/lib/copy_user_64.S linux-2.6.39.1/arch/x86/lib/copy_user_64.S --- linux-2.6.39.1/arch/x86/lib/copy_user_64.S 2011-06-03 00:04:13.000000000 -0400 +++ linux-2.6.39.1/arch/x86/lib/copy_user_64.S 2011-06-03 00:32:05.000000000 -0400 @@ -15,13 +15,14 @@ #include #include #include +#include .macro ALTERNATIVE_JUMP feature,orig,alt 0: .byte 0xe9 /* 32bit jump */ .long \orig-1f /* by default jump to orig */ 1: - .section .altinstr_replacement,"ax" + .section .altinstr_replacement,"a" 2: .byte 0xe9 /* near jump with 32bit immediate */ .long \alt-1b /* offset */ /* or alternatively to alt */ .previous @@ -64,37 +65,13 @@ #endif .endm -/* Standard copy_to_user with segment limit checking */ -ENTRY(_copy_to_user) - CFI_STARTPROC - GET_THREAD_INFO(%rax) - movq %rdi,%rcx - addq %rdx,%rcx - jc bad_to_user - cmpq TI_addr_limit(%rax),%rcx - ja bad_to_user - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string - CFI_ENDPROC -ENDPROC(_copy_to_user) - -/* Standard copy_from_user with segment limit checking */ -ENTRY(_copy_from_user) - CFI_STARTPROC - GET_THREAD_INFO(%rax) - movq %rsi,%rcx - addq %rdx,%rcx - jc bad_from_user - cmpq TI_addr_limit(%rax),%rcx - ja bad_from_user - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string - CFI_ENDPROC -ENDPROC(_copy_from_user) - .section .fixup,"ax" /* must zero dest */ ENTRY(bad_from_user) bad_from_user: CFI_STARTPROC + testl %edx,%edx + js bad_to_user movl %edx,%ecx xorl %eax,%eax rep diff -urNp linux-2.6.39.1/arch/x86/lib/copy_user_nocache_64.S linux-2.6.39.1/arch/x86/lib/copy_user_nocache_64.S --- linux-2.6.39.1/arch/x86/lib/copy_user_nocache_64.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/lib/copy_user_nocache_64.S 2011-05-22 19:36:30.000000000 -0400 @@ -14,6 +14,7 @@ #include #include #include +#include .macro ALIGN_DESTINATION #ifdef FIX_ALIGNMENT @@ -50,6 +51,15 @@ */ ENTRY(__copy_user_nocache) CFI_STARTPROC + +#ifdef CONFIG_PAX_MEMORY_UDEREF + mov $PAX_USER_SHADOW_BASE,%rcx + cmp %rcx,%rsi + jae 1f + add %rcx,%rsi +1: +#endif + cmpl $8,%edx jb 20f /* less then 8 bytes, go to byte copy loop */ ALIGN_DESTINATION diff -urNp linux-2.6.39.1/arch/x86/lib/csum-wrappers_64.c linux-2.6.39.1/arch/x86/lib/csum-wrappers_64.c --- linux-2.6.39.1/arch/x86/lib/csum-wrappers_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/lib/csum-wrappers_64.c 2011-05-22 19:36:30.000000000 -0400 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _ len -= 2; } } + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if ((unsigned long)src < PAX_USER_SHADOW_BASE) + src += PAX_USER_SHADOW_BASE; +#endif + isum = csum_partial_copy_generic((__force const void *)src, dst, len, isum, errp, NULL); if (unlikely(*errp)) @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr } *errp = 0; + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if ((unsigned long)dst < PAX_USER_SHADOW_BASE) + dst += PAX_USER_SHADOW_BASE; +#endif + return csum_partial_copy_generic(src, (void __force *)dst, len, isum, NULL, errp); } diff -urNp linux-2.6.39.1/arch/x86/lib/getuser.S linux-2.6.39.1/arch/x86/lib/getuser.S --- linux-2.6.39.1/arch/x86/lib/getuser.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/lib/getuser.S 2011-05-22 19:36:30.000000000 -0400 @@ -33,14 +33,35 @@ #include #include #include +#include +#include + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) +#define __copyuser_seg gs; +#else +#define __copyuser_seg +#endif .text ENTRY(__get_user_1) CFI_STARTPROC + +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) GET_THREAD_INFO(%_ASM_DX) cmp TI_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user -1: movzb (%_ASM_AX),%edx + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov $PAX_USER_SHADOW_BASE,%_ASM_DX + cmp %_ASM_DX,%_ASM_AX + jae 1234f + add %_ASM_DX,%_ASM_AX +1234: +#endif + +#endif + +1: __copyuser_seg movzb (%_ASM_AX),%edx xor %eax,%eax ret CFI_ENDPROC @@ -49,11 +70,24 @@ ENDPROC(__get_user_1) ENTRY(__get_user_2) CFI_STARTPROC add $1,%_ASM_AX + +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) jc bad_get_user GET_THREAD_INFO(%_ASM_DX) cmp TI_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user -2: movzwl -1(%_ASM_AX),%edx + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov $PAX_USER_SHADOW_BASE,%_ASM_DX + cmp %_ASM_DX,%_ASM_AX + jae 1234f + add %_ASM_DX,%_ASM_AX +1234: +#endif + +#endif + +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx xor %eax,%eax ret CFI_ENDPROC @@ -62,11 +96,24 @@ ENDPROC(__get_user_2) ENTRY(__get_user_4) CFI_STARTPROC add $3,%_ASM_AX + +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) jc bad_get_user GET_THREAD_INFO(%_ASM_DX) cmp TI_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user -3: mov -3(%_ASM_AX),%edx + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov $PAX_USER_SHADOW_BASE,%_ASM_DX + cmp %_ASM_DX,%_ASM_AX + jae 1234f + add %_ASM_DX,%_ASM_AX +1234: +#endif + +#endif + +3: __copyuser_seg mov -3(%_ASM_AX),%edx xor %eax,%eax ret CFI_ENDPROC @@ -80,6 +127,15 @@ ENTRY(__get_user_8) GET_THREAD_INFO(%_ASM_DX) cmp TI_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user + +#ifdef CONFIG_PAX_MEMORY_UDEREF + mov $PAX_USER_SHADOW_BASE,%_ASM_DX + cmp %_ASM_DX,%_ASM_AX + jae 1234f + add %_ASM_DX,%_ASM_AX +1234: +#endif + 4: movq -7(%_ASM_AX),%_ASM_DX xor %eax,%eax ret diff -urNp linux-2.6.39.1/arch/x86/lib/insn.c linux-2.6.39.1/arch/x86/lib/insn.c --- linux-2.6.39.1/arch/x86/lib/insn.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/lib/insn.c 2011-05-22 19:36:30.000000000 -0400 @@ -21,6 +21,11 @@ #include #include #include +#ifdef __KERNEL__ +#include +#else +#define ktla_ktva(addr) addr +#endif #define get_next(t, insn) \ ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; }) @@ -40,8 +45,8 @@ void insn_init(struct insn *insn, const void *kaddr, int x86_64) { memset(insn, 0, sizeof(*insn)); - insn->kaddr = kaddr; - insn->next_byte = kaddr; + insn->kaddr = ktla_ktva(kaddr); + insn->next_byte = ktla_ktva(kaddr); insn->x86_64 = x86_64 ? 1 : 0; insn->opnd_bytes = 4; if (x86_64) diff -urNp linux-2.6.39.1/arch/x86/lib/mmx_32.c linux-2.6.39.1/arch/x86/lib/mmx_32.c --- linux-2.6.39.1/arch/x86/lib/mmx_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/lib/mmx_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void * { void *p; int i; + unsigned long cr0; if (unlikely(in_interrupt())) return __memcpy(to, from, len); @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void * kernel_fpu_begin(); __asm__ __volatile__ ( - "1: prefetch (%0)\n" /* This set is 28 bytes */ - " prefetch 64(%0)\n" - " prefetch 128(%0)\n" - " prefetch 192(%0)\n" - " prefetch 256(%0)\n" + "1: prefetch (%1)\n" /* This set is 28 bytes */ + " prefetch 64(%1)\n" + " prefetch 128(%1)\n" + " prefetch 192(%1)\n" + " prefetch 256(%1)\n" "2: \n" ".section .fixup, \"ax\"\n" - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + "3: \n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" _ASM_EXTABLE(1b, 3b) - : : "r" (from)); + : "=&r" (cr0) : "r" (from) : "ax"); for ( ; i > 5; i--) { __asm__ __volatile__ ( - "1: prefetch 320(%0)\n" - "2: movq (%0), %%mm0\n" - " movq 8(%0), %%mm1\n" - " movq 16(%0), %%mm2\n" - " movq 24(%0), %%mm3\n" - " movq %%mm0, (%1)\n" - " movq %%mm1, 8(%1)\n" - " movq %%mm2, 16(%1)\n" - " movq %%mm3, 24(%1)\n" - " movq 32(%0), %%mm0\n" - " movq 40(%0), %%mm1\n" - " movq 48(%0), %%mm2\n" - " movq 56(%0), %%mm3\n" - " movq %%mm0, 32(%1)\n" - " movq %%mm1, 40(%1)\n" - " movq %%mm2, 48(%1)\n" - " movq %%mm3, 56(%1)\n" + "1: prefetch 320(%1)\n" + "2: movq (%1), %%mm0\n" + " movq 8(%1), %%mm1\n" + " movq 16(%1), %%mm2\n" + " movq 24(%1), %%mm3\n" + " movq %%mm0, (%2)\n" + " movq %%mm1, 8(%2)\n" + " movq %%mm2, 16(%2)\n" + " movq %%mm3, 24(%2)\n" + " movq 32(%1), %%mm0\n" + " movq 40(%1), %%mm1\n" + " movq 48(%1), %%mm2\n" + " movq 56(%1), %%mm3\n" + " movq %%mm0, 32(%2)\n" + " movq %%mm1, 40(%2)\n" + " movq %%mm2, 48(%2)\n" + " movq %%mm3, 56(%2)\n" ".section .fixup, \"ax\"\n" - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + "3:\n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" _ASM_EXTABLE(1b, 3b) - : : "r" (from), "r" (to) : "memory"); + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); from += 64; to += 64; @@ -158,6 +187,7 @@ static void fast_clear_page(void *page) static void fast_copy_page(void *to, void *from) { int i; + unsigned long cr0; kernel_fpu_begin(); @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi * but that is for later. -AV */ __asm__ __volatile__( - "1: prefetch (%0)\n" - " prefetch 64(%0)\n" - " prefetch 128(%0)\n" - " prefetch 192(%0)\n" - " prefetch 256(%0)\n" + "1: prefetch (%1)\n" + " prefetch 64(%1)\n" + " prefetch 128(%1)\n" + " prefetch 192(%1)\n" + " prefetch 256(%1)\n" "2: \n" ".section .fixup, \"ax\"\n" - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + "3: \n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b, 3b) : : "r" (from)); + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax"); for (i = 0; i < (4096-320)/64; i++) { __asm__ __volatile__ ( - "1: prefetch 320(%0)\n" - "2: movq (%0), %%mm0\n" - " movntq %%mm0, (%1)\n" - " movq 8(%0), %%mm1\n" - " movntq %%mm1, 8(%1)\n" - " movq 16(%0), %%mm2\n" - " movntq %%mm2, 16(%1)\n" - " movq 24(%0), %%mm3\n" - " movntq %%mm3, 24(%1)\n" - " movq 32(%0), %%mm4\n" - " movntq %%mm4, 32(%1)\n" - " movq 40(%0), %%mm5\n" - " movntq %%mm5, 40(%1)\n" - " movq 48(%0), %%mm6\n" - " movntq %%mm6, 48(%1)\n" - " movq 56(%0), %%mm7\n" - " movntq %%mm7, 56(%1)\n" + "1: prefetch 320(%1)\n" + "2: movq (%1), %%mm0\n" + " movntq %%mm0, (%2)\n" + " movq 8(%1), %%mm1\n" + " movntq %%mm1, 8(%2)\n" + " movq 16(%1), %%mm2\n" + " movntq %%mm2, 16(%2)\n" + " movq 24(%1), %%mm3\n" + " movntq %%mm3, 24(%2)\n" + " movq 32(%1), %%mm4\n" + " movntq %%mm4, 32(%2)\n" + " movq 40(%1), %%mm5\n" + " movntq %%mm5, 40(%2)\n" + " movq 48(%1), %%mm6\n" + " movntq %%mm6, 48(%2)\n" + " movq 56(%1), %%mm7\n" + " movntq %%mm7, 56(%2)\n" ".section .fixup, \"ax\"\n" - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + "3:\n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory"); + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); from += 64; to += 64; @@ -280,47 +338,76 @@ static void fast_clear_page(void *page) static void fast_copy_page(void *to, void *from) { int i; + unsigned long cr0; kernel_fpu_begin(); __asm__ __volatile__ ( - "1: prefetch (%0)\n" - " prefetch 64(%0)\n" - " prefetch 128(%0)\n" - " prefetch 192(%0)\n" - " prefetch 256(%0)\n" + "1: prefetch (%1)\n" + " prefetch 64(%1)\n" + " prefetch 128(%1)\n" + " prefetch 192(%1)\n" + " prefetch 256(%1)\n" "2: \n" ".section .fixup, \"ax\"\n" - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + "3: \n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" - _ASM_EXTABLE(1b, 3b) : : "r" (from)); + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax"); for (i = 0; i < 4096/64; i++) { __asm__ __volatile__ ( - "1: prefetch 320(%0)\n" - "2: movq (%0), %%mm0\n" - " movq 8(%0), %%mm1\n" - " movq 16(%0), %%mm2\n" - " movq 24(%0), %%mm3\n" - " movq %%mm0, (%1)\n" - " movq %%mm1, 8(%1)\n" - " movq %%mm2, 16(%1)\n" - " movq %%mm3, 24(%1)\n" - " movq 32(%0), %%mm0\n" - " movq 40(%0), %%mm1\n" - " movq 48(%0), %%mm2\n" - " movq 56(%0), %%mm3\n" - " movq %%mm0, 32(%1)\n" - " movq %%mm1, 40(%1)\n" - " movq %%mm2, 48(%1)\n" - " movq %%mm3, 56(%1)\n" + "1: prefetch 320(%1)\n" + "2: movq (%1), %%mm0\n" + " movq 8(%1), %%mm1\n" + " movq 16(%1), %%mm2\n" + " movq 24(%1), %%mm3\n" + " movq %%mm0, (%2)\n" + " movq %%mm1, 8(%2)\n" + " movq %%mm2, 16(%2)\n" + " movq %%mm3, 24(%2)\n" + " movq 32(%1), %%mm0\n" + " movq 40(%1), %%mm1\n" + " movq 48(%1), %%mm2\n" + " movq 56(%1), %%mm3\n" + " movq %%mm0, 32(%2)\n" + " movq %%mm1, 40(%2)\n" + " movq %%mm2, 48(%2)\n" + " movq %%mm3, 56(%2)\n" ".section .fixup, \"ax\"\n" - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + "3:\n" + +#ifdef CONFIG_PAX_KERNEXEC + " movl %%cr0, %0\n" + " movl %0, %%eax\n" + " andl $0xFFFEFFFF, %%eax\n" + " movl %%eax, %%cr0\n" +#endif + + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ + +#ifdef CONFIG_PAX_KERNEXEC + " movl %0, %%cr0\n" +#endif + " jmp 2b\n" ".previous\n" _ASM_EXTABLE(1b, 3b) - : : "r" (from), "r" (to) : "memory"); + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); from += 64; to += 64; diff -urNp linux-2.6.39.1/arch/x86/lib/putuser.S linux-2.6.39.1/arch/x86/lib/putuser.S --- linux-2.6.39.1/arch/x86/lib/putuser.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/lib/putuser.S 2011-05-22 19:36:30.000000000 -0400 @@ -15,7 +15,8 @@ #include #include #include - +#include +#include /* * __put_user_X @@ -29,52 +30,119 @@ * as they get called from within inline assembly. */ -#define ENTER CFI_STARTPROC ; \ - GET_THREAD_INFO(%_ASM_BX) +#define ENTER CFI_STARTPROC #define EXIT ret ; \ CFI_ENDPROC +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +#define _DEST %_ASM_CX,%_ASM_BX +#else +#define _DEST %_ASM_CX +#endif + +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) +#define __copyuser_seg gs; +#else +#define __copyuser_seg +#endif + .text ENTRY(__put_user_1) ENTER + +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) + GET_THREAD_INFO(%_ASM_BX) cmp TI_addr_limit(%_ASM_BX),%_ASM_CX jae bad_put_user -1: movb %al,(%_ASM_CX) + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov $PAX_USER_SHADOW_BASE,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jb 1234f + xor %ebx,%ebx +1234: +#endif + +#endif + +1: __copyuser_seg movb %al,(_DEST) xor %eax,%eax EXIT ENDPROC(__put_user_1) ENTRY(__put_user_2) ENTER + +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) + GET_THREAD_INFO(%_ASM_BX) mov TI_addr_limit(%_ASM_BX),%_ASM_BX sub $1,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user -2: movw %ax,(%_ASM_CX) + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov $PAX_USER_SHADOW_BASE,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jb 1234f + xor %ebx,%ebx +1234: +#endif + +#endif + +2: __copyuser_seg movw %ax,(_DEST) xor %eax,%eax EXIT ENDPROC(__put_user_2) ENTRY(__put_user_4) ENTER + +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) + GET_THREAD_INFO(%_ASM_BX) mov TI_addr_limit(%_ASM_BX),%_ASM_BX sub $3,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user -3: movl %eax,(%_ASM_CX) + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov $PAX_USER_SHADOW_BASE,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jb 1234f + xor %ebx,%ebx +1234: +#endif + +#endif + +3: __copyuser_seg movl %eax,(_DEST) xor %eax,%eax EXIT ENDPROC(__put_user_4) ENTRY(__put_user_8) ENTER + +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF) + GET_THREAD_INFO(%_ASM_BX) mov TI_addr_limit(%_ASM_BX),%_ASM_BX sub $7,%_ASM_BX cmp %_ASM_BX,%_ASM_CX jae bad_put_user -4: mov %_ASM_AX,(%_ASM_CX) + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + mov $PAX_USER_SHADOW_BASE,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jb 1234f + xor %ebx,%ebx +1234: +#endif + +#endif + +4: __copyuser_seg mov %_ASM_AX,(_DEST) #ifdef CONFIG_X86_32 -5: movl %edx,4(%_ASM_CX) +5: __copyuser_seg movl %edx,4(_DEST) #endif xor %eax,%eax EXIT diff -urNp linux-2.6.39.1/arch/x86/lib/usercopy_32.c linux-2.6.39.1/arch/x86/lib/usercopy_32.c --- linux-2.6.39.1/arch/x86/lib/usercopy_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/lib/usercopy_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -43,7 +43,7 @@ do { \ __asm__ __volatile__( \ " testl %1,%1\n" \ " jz 2f\n" \ - "0: lodsb\n" \ + "0: "__copyuser_seg"lodsb\n" \ " stosb\n" \ " testb %%al,%%al\n" \ " jz 1f\n" \ @@ -128,10 +128,12 @@ do { \ int __d0; \ might_fault(); \ __asm__ __volatile__( \ + __COPYUSER_SET_ES \ "0: rep; stosl\n" \ " movl %2,%0\n" \ "1: rep; stosb\n" \ "2:\n" \ + __COPYUSER_RESTORE_ES \ ".section .fixup,\"ax\"\n" \ "3: lea 0(%2,%0,4),%0\n" \ " jmp 2b\n" \ @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, might_fault(); __asm__ __volatile__( + __COPYUSER_SET_ES " testl %0, %0\n" " jz 3f\n" " andl %0,%%ecx\n" @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, " subl %%ecx,%0\n" " addl %0,%%eax\n" "1:\n" + __COPYUSER_RESTORE_ES ".section .fixup,\"ax\"\n" "2: xorl %%eax,%%eax\n" " jmp 1b\n" @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user); #ifdef CONFIG_X86_INTEL_USERCOPY static unsigned long -__copy_user_intel(void __user *to, const void *from, unsigned long size) +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size) { int d0, d1; __asm__ __volatile__( @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const " .align 2,0x90\n" "3: movl 0(%4), %%eax\n" "4: movl 4(%4), %%edx\n" - "5: movl %%eax, 0(%3)\n" - "6: movl %%edx, 4(%3)\n" + "5: "__copyuser_seg" movl %%eax, 0(%3)\n" + "6: "__copyuser_seg" movl %%edx, 4(%3)\n" "7: movl 8(%4), %%eax\n" "8: movl 12(%4),%%edx\n" - "9: movl %%eax, 8(%3)\n" - "10: movl %%edx, 12(%3)\n" + "9: "__copyuser_seg" movl %%eax, 8(%3)\n" + "10: "__copyuser_seg" movl %%edx, 12(%3)\n" "11: movl 16(%4), %%eax\n" "12: movl 20(%4), %%edx\n" - "13: movl %%eax, 16(%3)\n" - "14: movl %%edx, 20(%3)\n" + "13: "__copyuser_seg" movl %%eax, 16(%3)\n" + "14: "__copyuser_seg" movl %%edx, 20(%3)\n" "15: movl 24(%4), %%eax\n" "16: movl 28(%4), %%edx\n" - "17: movl %%eax, 24(%3)\n" - "18: movl %%edx, 28(%3)\n" + "17: "__copyuser_seg" movl %%eax, 24(%3)\n" + "18: "__copyuser_seg" movl %%edx, 28(%3)\n" "19: movl 32(%4), %%eax\n" "20: movl 36(%4), %%edx\n" - "21: movl %%eax, 32(%3)\n" - "22: movl %%edx, 36(%3)\n" + "21: "__copyuser_seg" movl %%eax, 32(%3)\n" + "22: "__copyuser_seg" movl %%edx, 36(%3)\n" "23: movl 40(%4), %%eax\n" "24: movl 44(%4), %%edx\n" - "25: movl %%eax, 40(%3)\n" - "26: movl %%edx, 44(%3)\n" + "25: "__copyuser_seg" movl %%eax, 40(%3)\n" + "26: "__copyuser_seg" movl %%edx, 44(%3)\n" "27: movl 48(%4), %%eax\n" "28: movl 52(%4), %%edx\n" - "29: movl %%eax, 48(%3)\n" - "30: movl %%edx, 52(%3)\n" + "29: "__copyuser_seg" movl %%eax, 48(%3)\n" + "30: "__copyuser_seg" movl %%edx, 52(%3)\n" "31: movl 56(%4), %%eax\n" "32: movl 60(%4), %%edx\n" - "33: movl %%eax, 56(%3)\n" - "34: movl %%edx, 60(%3)\n" + "33: "__copyuser_seg" movl %%eax, 56(%3)\n" + "34: "__copyuser_seg" movl %%edx, 60(%3)\n" " addl $-64, %0\n" " addl $64, %4\n" " addl $64, %3\n" @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const " shrl $2, %0\n" " andl $3, %%eax\n" " cld\n" + __COPYUSER_SET_ES "99: rep; movsl\n" "36: movl %%eax, %0\n" "37: rep; movsb\n" "100:\n" + __COPYUSER_RESTORE_ES + ".section .fixup,\"ax\"\n" + "101: lea 0(%%eax,%0,4),%0\n" + " jmp 100b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 1b,100b\n" + " .long 2b,100b\n" + " .long 3b,100b\n" + " .long 4b,100b\n" + " .long 5b,100b\n" + " .long 6b,100b\n" + " .long 7b,100b\n" + " .long 8b,100b\n" + " .long 9b,100b\n" + " .long 10b,100b\n" + " .long 11b,100b\n" + " .long 12b,100b\n" + " .long 13b,100b\n" + " .long 14b,100b\n" + " .long 15b,100b\n" + " .long 16b,100b\n" + " .long 17b,100b\n" + " .long 18b,100b\n" + " .long 19b,100b\n" + " .long 20b,100b\n" + " .long 21b,100b\n" + " .long 22b,100b\n" + " .long 23b,100b\n" + " .long 24b,100b\n" + " .long 25b,100b\n" + " .long 26b,100b\n" + " .long 27b,100b\n" + " .long 28b,100b\n" + " .long 29b,100b\n" + " .long 30b,100b\n" + " .long 31b,100b\n" + " .long 32b,100b\n" + " .long 33b,100b\n" + " .long 34b,100b\n" + " .long 35b,100b\n" + " .long 36b,100b\n" + " .long 37b,100b\n" + " .long 99b,101b\n" + ".previous" + : "=&c"(size), "=&D" (d0), "=&S" (d1) + : "1"(to), "2"(from), "0"(size) + : "eax", "edx", "memory"); + return size; +} + +static unsigned long +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size) +{ + int d0, d1; + __asm__ __volatile__( + " .align 2,0x90\n" + "1: "__copyuser_seg" movl 32(%4), %%eax\n" + " cmpl $67, %0\n" + " jbe 3f\n" + "2: "__copyuser_seg" movl 64(%4), %%eax\n" + " .align 2,0x90\n" + "3: "__copyuser_seg" movl 0(%4), %%eax\n" + "4: "__copyuser_seg" movl 4(%4), %%edx\n" + "5: movl %%eax, 0(%3)\n" + "6: movl %%edx, 4(%3)\n" + "7: "__copyuser_seg" movl 8(%4), %%eax\n" + "8: "__copyuser_seg" movl 12(%4),%%edx\n" + "9: movl %%eax, 8(%3)\n" + "10: movl %%edx, 12(%3)\n" + "11: "__copyuser_seg" movl 16(%4), %%eax\n" + "12: "__copyuser_seg" movl 20(%4), %%edx\n" + "13: movl %%eax, 16(%3)\n" + "14: movl %%edx, 20(%3)\n" + "15: "__copyuser_seg" movl 24(%4), %%eax\n" + "16: "__copyuser_seg" movl 28(%4), %%edx\n" + "17: movl %%eax, 24(%3)\n" + "18: movl %%edx, 28(%3)\n" + "19: "__copyuser_seg" movl 32(%4), %%eax\n" + "20: "__copyuser_seg" movl 36(%4), %%edx\n" + "21: movl %%eax, 32(%3)\n" + "22: movl %%edx, 36(%3)\n" + "23: "__copyuser_seg" movl 40(%4), %%eax\n" + "24: "__copyuser_seg" movl 44(%4), %%edx\n" + "25: movl %%eax, 40(%3)\n" + "26: movl %%edx, 44(%3)\n" + "27: "__copyuser_seg" movl 48(%4), %%eax\n" + "28: "__copyuser_seg" movl 52(%4), %%edx\n" + "29: movl %%eax, 48(%3)\n" + "30: movl %%edx, 52(%3)\n" + "31: "__copyuser_seg" movl 56(%4), %%eax\n" + "32: "__copyuser_seg" movl 60(%4), %%edx\n" + "33: movl %%eax, 56(%3)\n" + "34: movl %%edx, 60(%3)\n" + " addl $-64, %0\n" + " addl $64, %4\n" + " addl $64, %3\n" + " cmpl $63, %0\n" + " ja 1b\n" + "35: movl %0, %%eax\n" + " shrl $2, %0\n" + " andl $3, %%eax\n" + " cld\n" + "99: rep; "__copyuser_seg" movsl\n" + "36: movl %%eax, %0\n" + "37: rep; "__copyuser_seg" movsb\n" + "100:\n" ".section .fixup,\"ax\"\n" "101: lea 0(%%eax,%0,4),%0\n" " jmp 100b\n" @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons int d0, d1; __asm__ __volatile__( " .align 2,0x90\n" - "0: movl 32(%4), %%eax\n" + "0: "__copyuser_seg" movl 32(%4), %%eax\n" " cmpl $67, %0\n" " jbe 2f\n" - "1: movl 64(%4), %%eax\n" + "1: "__copyuser_seg" movl 64(%4), %%eax\n" " .align 2,0x90\n" - "2: movl 0(%4), %%eax\n" - "21: movl 4(%4), %%edx\n" + "2: "__copyuser_seg" movl 0(%4), %%eax\n" + "21: "__copyuser_seg" movl 4(%4), %%edx\n" " movl %%eax, 0(%3)\n" " movl %%edx, 4(%3)\n" - "3: movl 8(%4), %%eax\n" - "31: movl 12(%4),%%edx\n" + "3: "__copyuser_seg" movl 8(%4), %%eax\n" + "31: "__copyuser_seg" movl 12(%4),%%edx\n" " movl %%eax, 8(%3)\n" " movl %%edx, 12(%3)\n" - "4: movl 16(%4), %%eax\n" - "41: movl 20(%4), %%edx\n" + "4: "__copyuser_seg" movl 16(%4), %%eax\n" + "41: "__copyuser_seg" movl 20(%4), %%edx\n" " movl %%eax, 16(%3)\n" " movl %%edx, 20(%3)\n" - "10: movl 24(%4), %%eax\n" - "51: movl 28(%4), %%edx\n" + "10: "__copyuser_seg" movl 24(%4), %%eax\n" + "51: "__copyuser_seg" movl 28(%4), %%edx\n" " movl %%eax, 24(%3)\n" " movl %%edx, 28(%3)\n" - "11: movl 32(%4), %%eax\n" - "61: movl 36(%4), %%edx\n" + "11: "__copyuser_seg" movl 32(%4), %%eax\n" + "61: "__copyuser_seg" movl 36(%4), %%edx\n" " movl %%eax, 32(%3)\n" " movl %%edx, 36(%3)\n" - "12: movl 40(%4), %%eax\n" - "71: movl 44(%4), %%edx\n" + "12: "__copyuser_seg" movl 40(%4), %%eax\n" + "71: "__copyuser_seg" movl 44(%4), %%edx\n" " movl %%eax, 40(%3)\n" " movl %%edx, 44(%3)\n" - "13: movl 48(%4), %%eax\n" - "81: movl 52(%4), %%edx\n" + "13: "__copyuser_seg" movl 48(%4), %%eax\n" + "81: "__copyuser_seg" movl 52(%4), %%edx\n" " movl %%eax, 48(%3)\n" " movl %%edx, 52(%3)\n" - "14: movl 56(%4), %%eax\n" - "91: movl 60(%4), %%edx\n" + "14: "__copyuser_seg" movl 56(%4), %%eax\n" + "91: "__copyuser_seg" movl 60(%4), %%edx\n" " movl %%eax, 56(%3)\n" " movl %%edx, 60(%3)\n" " addl $-64, %0\n" @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons " shrl $2, %0\n" " andl $3, %%eax\n" " cld\n" - "6: rep; movsl\n" + "6: rep; "__copyuser_seg" movsl\n" " movl %%eax,%0\n" - "7: rep; movsb\n" + "7: rep; "__copyuser_seg" movsb\n" "8:\n" ".section .fixup,\"ax\"\n" "9: lea 0(%%eax,%0,4),%0\n" @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing __asm__ __volatile__( " .align 2,0x90\n" - "0: movl 32(%4), %%eax\n" + "0: "__copyuser_seg" movl 32(%4), %%eax\n" " cmpl $67, %0\n" " jbe 2f\n" - "1: movl 64(%4), %%eax\n" + "1: "__copyuser_seg" movl 64(%4), %%eax\n" " .align 2,0x90\n" - "2: movl 0(%4), %%eax\n" - "21: movl 4(%4), %%edx\n" + "2: "__copyuser_seg" movl 0(%4), %%eax\n" + "21: "__copyuser_seg" movl 4(%4), %%edx\n" " movnti %%eax, 0(%3)\n" " movnti %%edx, 4(%3)\n" - "3: movl 8(%4), %%eax\n" - "31: movl 12(%4),%%edx\n" + "3: "__copyuser_seg" movl 8(%4), %%eax\n" + "31: "__copyuser_seg" movl 12(%4),%%edx\n" " movnti %%eax, 8(%3)\n" " movnti %%edx, 12(%3)\n" - "4: movl 16(%4), %%eax\n" - "41: movl 20(%4), %%edx\n" + "4: "__copyuser_seg" movl 16(%4), %%eax\n" + "41: "__copyuser_seg" movl 20(%4), %%edx\n" " movnti %%eax, 16(%3)\n" " movnti %%edx, 20(%3)\n" - "10: movl 24(%4), %%eax\n" - "51: movl 28(%4), %%edx\n" + "10: "__copyuser_seg" movl 24(%4), %%eax\n" + "51: "__copyuser_seg" movl 28(%4), %%edx\n" " movnti %%eax, 24(%3)\n" " movnti %%edx, 28(%3)\n" - "11: movl 32(%4), %%eax\n" - "61: movl 36(%4), %%edx\n" + "11: "__copyuser_seg" movl 32(%4), %%eax\n" + "61: "__copyuser_seg" movl 36(%4), %%edx\n" " movnti %%eax, 32(%3)\n" " movnti %%edx, 36(%3)\n" - "12: movl 40(%4), %%eax\n" - "71: movl 44(%4), %%edx\n" + "12: "__copyuser_seg" movl 40(%4), %%eax\n" + "71: "__copyuser_seg" movl 44(%4), %%edx\n" " movnti %%eax, 40(%3)\n" " movnti %%edx, 44(%3)\n" - "13: movl 48(%4), %%eax\n" - "81: movl 52(%4), %%edx\n" + "13: "__copyuser_seg" movl 48(%4), %%eax\n" + "81: "__copyuser_seg" movl 52(%4), %%edx\n" " movnti %%eax, 48(%3)\n" " movnti %%edx, 52(%3)\n" - "14: movl 56(%4), %%eax\n" - "91: movl 60(%4), %%edx\n" + "14: "__copyuser_seg" movl 56(%4), %%eax\n" + "91: "__copyuser_seg" movl 60(%4), %%edx\n" " movnti %%eax, 56(%3)\n" " movnti %%edx, 60(%3)\n" " addl $-64, %0\n" @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing " shrl $2, %0\n" " andl $3, %%eax\n" " cld\n" - "6: rep; movsl\n" + "6: rep; "__copyuser_seg" movsl\n" " movl %%eax,%0\n" - "7: rep; movsb\n" + "7: rep; "__copyuser_seg" movsb\n" "8:\n" ".section .fixup,\"ax\"\n" "9: lea 0(%%eax,%0,4),%0\n" @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n __asm__ __volatile__( " .align 2,0x90\n" - "0: movl 32(%4), %%eax\n" + "0: "__copyuser_seg" movl 32(%4), %%eax\n" " cmpl $67, %0\n" " jbe 2f\n" - "1: movl 64(%4), %%eax\n" + "1: "__copyuser_seg" movl 64(%4), %%eax\n" " .align 2,0x90\n" - "2: movl 0(%4), %%eax\n" - "21: movl 4(%4), %%edx\n" + "2: "__copyuser_seg" movl 0(%4), %%eax\n" + "21: "__copyuser_seg" movl 4(%4), %%edx\n" " movnti %%eax, 0(%3)\n" " movnti %%edx, 4(%3)\n" - "3: movl 8(%4), %%eax\n" - "31: movl 12(%4),%%edx\n" + "3: "__copyuser_seg" movl 8(%4), %%eax\n" + "31: "__copyuser_seg" movl 12(%4),%%edx\n" " movnti %%eax, 8(%3)\n" " movnti %%edx, 12(%3)\n" - "4: movl 16(%4), %%eax\n" - "41: movl 20(%4), %%edx\n" + "4: "__copyuser_seg" movl 16(%4), %%eax\n" + "41: "__copyuser_seg" movl 20(%4), %%edx\n" " movnti %%eax, 16(%3)\n" " movnti %%edx, 20(%3)\n" - "10: movl 24(%4), %%eax\n" - "51: movl 28(%4), %%edx\n" + "10: "__copyuser_seg" movl 24(%4), %%eax\n" + "51: "__copyuser_seg" movl 28(%4), %%edx\n" " movnti %%eax, 24(%3)\n" " movnti %%edx, 28(%3)\n" - "11: movl 32(%4), %%eax\n" - "61: movl 36(%4), %%edx\n" + "11: "__copyuser_seg" movl 32(%4), %%eax\n" + "61: "__copyuser_seg" movl 36(%4), %%edx\n" " movnti %%eax, 32(%3)\n" " movnti %%edx, 36(%3)\n" - "12: movl 40(%4), %%eax\n" - "71: movl 44(%4), %%edx\n" + "12: "__copyuser_seg" movl 40(%4), %%eax\n" + "71: "__copyuser_seg" movl 44(%4), %%edx\n" " movnti %%eax, 40(%3)\n" " movnti %%edx, 44(%3)\n" - "13: movl 48(%4), %%eax\n" - "81: movl 52(%4), %%edx\n" + "13: "__copyuser_seg" movl 48(%4), %%eax\n" + "81: "__copyuser_seg" movl 52(%4), %%edx\n" " movnti %%eax, 48(%3)\n" " movnti %%edx, 52(%3)\n" - "14: movl 56(%4), %%eax\n" - "91: movl 60(%4), %%edx\n" + "14: "__copyuser_seg" movl 56(%4), %%eax\n" + "91: "__copyuser_seg" movl 60(%4), %%edx\n" " movnti %%eax, 56(%3)\n" " movnti %%edx, 60(%3)\n" " addl $-64, %0\n" @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n " shrl $2, %0\n" " andl $3, %%eax\n" " cld\n" - "6: rep; movsl\n" + "6: rep; "__copyuser_seg" movsl\n" " movl %%eax,%0\n" - "7: rep; movsb\n" + "7: rep; "__copyuser_seg" movsb\n" "8:\n" ".section .fixup,\"ax\"\n" "9: lea 0(%%eax,%0,4),%0\n" @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n */ unsigned long __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size); -unsigned long __copy_user_intel(void __user *to, const void *from, +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from, + unsigned long size); +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size); unsigned long __copy_user_zeroing_intel_nocache(void *to, const void __user *from, unsigned long size); #endif /* CONFIG_X86_INTEL_USERCOPY */ /* Generic arbitrary sized copy. */ -#define __copy_user(to, from, size) \ +#define __copy_user(to, from, size, prefix, set, restore) \ do { \ int __d0, __d1, __d2; \ __asm__ __volatile__( \ + set \ " cmp $7,%0\n" \ " jbe 1f\n" \ " movl %1,%0\n" \ " negl %0\n" \ " andl $7,%0\n" \ " subl %0,%3\n" \ - "4: rep; movsb\n" \ + "4: rep; "prefix"movsb\n" \ " movl %3,%0\n" \ " shrl $2,%0\n" \ " andl $3,%3\n" \ " .align 2,0x90\n" \ - "0: rep; movsl\n" \ + "0: rep; "prefix"movsl\n" \ " movl %3,%0\n" \ - "1: rep; movsb\n" \ + "1: rep; "prefix"movsb\n" \ "2:\n" \ + restore \ ".section .fixup,\"ax\"\n" \ "5: addl %3,%0\n" \ " jmp 2b\n" \ @@ -682,14 +799,14 @@ do { \ " negl %0\n" \ " andl $7,%0\n" \ " subl %0,%3\n" \ - "4: rep; movsb\n" \ + "4: rep; "__copyuser_seg"movsb\n" \ " movl %3,%0\n" \ " shrl $2,%0\n" \ " andl $3,%3\n" \ " .align 2,0x90\n" \ - "0: rep; movsl\n" \ + "0: rep; "__copyuser_seg"movsl\n" \ " movl %3,%0\n" \ - "1: rep; movsb\n" \ + "1: rep; "__copyuser_seg"movsb\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "5: addl %3,%0\n" \ @@ -775,9 +892,9 @@ survive: } #endif if (movsl_is_ok(to, from, n)) - __copy_user(to, from, n); + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES); else - n = __copy_user_intel(to, from, n); + n = __generic_copy_to_user_intel(to, from, n); return n; } EXPORT_SYMBOL(__copy_to_user_ll); @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero unsigned long n) { if (movsl_is_ok(to, from, n)) - __copy_user(to, from, n); + __copy_user(to, from, n, __copyuser_seg, "", ""); else - n = __copy_user_intel((void __user *)to, - (const void *)from, n); + n = __generic_copy_from_user_intel(to, from, n); return n; } EXPORT_SYMBOL(__copy_from_user_ll_nozero); @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach if (n > 64 && cpu_has_xmm2) n = __copy_user_intel_nocache(to, from, n); else - __copy_user(to, from, n); + __copy_user(to, from, n, __copyuser_seg, "", ""); #else - __copy_user(to, from, n); + __copy_user(to, from, n, __copyuser_seg, "", ""); #endif return n; } EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); -/** - * copy_to_user: - Copy a block of data into user space. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep. - * - * Copy data from kernel space to user space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -unsigned long -copy_to_user(void __user *to, const void *from, unsigned long n) +void copy_from_user_overflow(void) { - if (access_ok(VERIFY_WRITE, to, n)) - n = __copy_to_user(to, from, n); - return n; + WARN(1, "Buffer overflow detected!\n"); } -EXPORT_SYMBOL(copy_to_user); +EXPORT_SYMBOL(copy_from_user_overflow); -/** - * copy_from_user: - Copy a block of data from user space. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep. - * - * Copy data from user space to kernel space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -unsigned long -_copy_from_user(void *to, const void __user *from, unsigned long n) +void copy_to_user_overflow(void) { - if (access_ok(VERIFY_READ, from, n)) - n = __copy_from_user(to, from, n); - else - memset(to, 0, n); - return n; + WARN(1, "Buffer overflow detected!\n"); } -EXPORT_SYMBOL(_copy_from_user); +EXPORT_SYMBOL(copy_to_user_overflow); -void copy_from_user_overflow(void) +#ifdef CONFIG_PAX_MEMORY_UDEREF +void __set_fs(mm_segment_t x) { - WARN(1, "Buffer overflow detected!\n"); + switch (x.seg) { + case 0: + loadsegment(gs, 0); + break; + case TASK_SIZE_MAX: + loadsegment(gs, __USER_DS); + break; + case -1UL: + loadsegment(gs, __KERNEL_DS); + break; + default: + BUG(); + } + return; } -EXPORT_SYMBOL(copy_from_user_overflow); +EXPORT_SYMBOL(__set_fs); + +void set_fs(mm_segment_t x) +{ + current_thread_info()->addr_limit = x; + __set_fs(x); +} +EXPORT_SYMBOL(set_fs); +#endif diff -urNp linux-2.6.39.1/arch/x86/lib/usercopy_64.c linux-2.6.39.1/arch/x86/lib/usercopy_64.c --- linux-2.6.39.1/arch/x86/lib/usercopy_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/lib/usercopy_64.c 2011-05-22 19:36:30.000000000 -0400 @@ -42,6 +42,12 @@ long __strncpy_from_user(char *dst, const char __user *src, long count) { long res; + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if ((unsigned long)src < PAX_USER_SHADOW_BASE) + src += PAX_USER_SHADOW_BASE; +#endif + __do_strncpy_from_user(dst, src, count, res); return res; } @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user * { long __d0; might_fault(); + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if ((unsigned long)addr < PAX_USER_SHADOW_BASE) + addr += PAX_USER_SHADOW_BASE; +#endif + /* no memory constraint because it doesn't change any memory gcc knows about */ asm volatile( @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user); unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len) { - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { + +#ifdef CONFIG_PAX_MEMORY_UDEREF + if ((unsigned long)to < PAX_USER_SHADOW_BASE) + to += PAX_USER_SHADOW_BASE; + if ((unsigned long)from < PAX_USER_SHADOW_BASE) + from += PAX_USER_SHADOW_BASE; +#endif + return copy_user_generic((__force void *)to, (__force void *)from, len); - } - return len; + } + return len; } EXPORT_SYMBOL(copy_in_user); diff -urNp linux-2.6.39.1/arch/x86/Makefile linux-2.6.39.1/arch/x86/Makefile --- linux-2.6.39.1/arch/x86/Makefile 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/Makefile 2011-05-22 19:36:30.000000000 -0400 @@ -195,3 +195,12 @@ define archhelp echo ' FDARGS="..." arguments for the booted kernel' echo ' FDINITRD=file initrd for the booted kernel' endef + +define OLD_LD + +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils. +*** Please upgrade your binutils to 2.18 or newer +endef + +archprepare: + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD))) diff -urNp linux-2.6.39.1/arch/x86/mm/extable.c linux-2.6.39.1/arch/x86/mm/extable.c --- linux-2.6.39.1/arch/x86/mm/extable.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/extable.c 2011-05-22 19:36:30.000000000 -0400 @@ -1,14 +1,71 @@ #include #include +#include #include +#include +/* + * The exception table needs to be sorted so that the binary + * search that we use to find entries in it works properly. + * This is used both for the kernel exception table and for + * the exception tables of modules that get loaded. + */ +static int cmp_ex(const void *a, const void *b) +{ + const struct exception_table_entry *x = a, *y = b; + + /* avoid overflow */ + if (x->insn > y->insn) + return 1; + if (x->insn < y->insn) + return -1; + return 0; +} + +static void swap_ex(void *a, void *b, int size) +{ + struct exception_table_entry t, *x = a, *y = b; + + t = *x; + + pax_open_kernel(); + *x = *y; + *y = t; + pax_close_kernel(); +} + +void sort_extable(struct exception_table_entry *start, + struct exception_table_entry *finish) +{ + sort(start, finish - start, sizeof(struct exception_table_entry), + cmp_ex, swap_ex); +} + +#ifdef CONFIG_MODULES +/* + * If the exception table is sorted, any referring to the module init + * will be at the beginning or the end. + */ +void trim_init_extable(struct module *m) +{ + /*trim the beginning*/ + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) { + m->extable++; + m->num_exentries--; + } + /*trim the end*/ + while (m->num_exentries && + within_module_init(m->extable[m->num_exentries-1].insn, m)) + m->num_exentries--; +} +#endif /* CONFIG_MODULES */ int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fixup; #ifdef CONFIG_PNPBIOS - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) { + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) { extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp; extern u32 pnp_bios_is_utter_crap; pnp_bios_is_utter_crap = 1; diff -urNp linux-2.6.39.1/arch/x86/mm/fault.c linux-2.6.39.1/arch/x86/mm/fault.c --- linux-2.6.39.1/arch/x86/mm/fault.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/fault.c 2011-06-06 17:34:04.000000000 -0400 @@ -12,10 +12,18 @@ #include /* kmmio_handler, ... */ #include /* perf_sw_event */ #include /* hstate_index_to_shift */ +#include +#include #include /* dotraplinkage, ... */ #include /* pgd_*(), ... */ #include /* kmemcheck_*(), ... */ +#include +#include + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +#include +#endif /* * Page fault error code bits: @@ -53,7 +61,7 @@ static inline int __kprobes notify_page_ int ret = 0; /* kprobe_running() needs smp_processor_id() */ - if (kprobes_built_in() && !user_mode_vm(regs)) { + if (kprobes_built_in() && !user_mode(regs)) { preempt_disable(); if (kprobe_running() && kprobe_fault_handler(regs, 14)) ret = 1; @@ -114,7 +122,10 @@ check_prefetch_opcode(struct pt_regs *re return !instr_lo || (instr_lo>>1) == 1; case 0x00: /* Prefetch instruction is 0x0F0D or 0x0F18 */ - if (probe_kernel_address(instr, opcode)) + if (user_mode(regs)) { + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1)) + return 0; + } else if (probe_kernel_address(instr, opcode)) return 0; *prefetch = (instr_lo == 0xF) && @@ -148,7 +159,10 @@ is_prefetch(struct pt_regs *regs, unsign while (instr < max_instr) { unsigned char opcode; - if (probe_kernel_address(instr, opcode)) + if (user_mode(regs)) { + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1)) + break; + } else if (probe_kernel_address(instr, opcode)) break; instr++; @@ -179,6 +193,30 @@ force_sig_info_fault(int si_signo, int s force_sig_info(si_signo, &info, tsk); } +#ifdef CONFIG_PAX_EMUTRAMP +static int pax_handle_fetch_fault(struct pt_regs *regs); +#endif + +#ifdef CONFIG_PAX_PAGEEXEC +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset(mm, address); + if (!pgd_present(*pgd)) + return NULL; + pud = pud_offset(pgd, address); + if (!pud_present(*pud)) + return NULL; + pmd = pmd_offset(pud, address); + if (!pmd_present(*pmd)) + return NULL; + return pmd; +} +#endif + DEFINE_SPINLOCK(pgd_lock); LIST_HEAD(pgd_list); @@ -229,10 +267,22 @@ void vmalloc_sync_all(void) for (address = VMALLOC_START & PMD_MASK; address >= TASK_SIZE && address < FIXADDR_TOP; address += PMD_SIZE) { + +#ifdef CONFIG_PAX_PER_CPU_PGD + unsigned long cpu; +#else struct page *page; +#endif spin_lock(&pgd_lock); + +#ifdef CONFIG_PAX_PER_CPU_PGD + for (cpu = 0; cpu < NR_CPUS; ++cpu) { + pgd_t *pgd = get_cpu_pgd(cpu); + pmd_t *ret; +#else list_for_each_entry(page, &pgd_list, lru) { + pgd_t *pgd = page_address(page); spinlock_t *pgt_lock; pmd_t *ret; @@ -240,8 +290,13 @@ void vmalloc_sync_all(void) pgt_lock = &pgd_page_get_mm(page)->page_table_lock; spin_lock(pgt_lock); - ret = vmalloc_sync_one(page_address(page), address); +#endif + + ret = vmalloc_sync_one(pgd, address); + +#ifndef CONFIG_PAX_PER_CPU_PGD spin_unlock(pgt_lock); +#endif if (!ret) break; @@ -275,6 +330,11 @@ static noinline __kprobes int vmalloc_fa * an interrupt in the middle of a task switch.. */ pgd_paddr = read_cr3(); + +#ifdef CONFIG_PAX_PER_CPU_PGD + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK)); +#endif + pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); if (!pmd_k) return -1; @@ -370,7 +430,14 @@ static noinline __kprobes int vmalloc_fa * happen within a race in page table update. In the later * case just flush: */ + +#ifdef CONFIG_PAX_PER_CPU_PGD + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK)); + pgd = pgd_offset_cpu(smp_processor_id(), address); +#else pgd = pgd_offset(current->active_mm, address); +#endif + pgd_ref = pgd_offset_k(address); if (pgd_none(*pgd_ref)) return -1; @@ -532,7 +599,7 @@ static int is_errata93(struct pt_regs *r static int is_errata100(struct pt_regs *regs, unsigned long address) { #ifdef CONFIG_X86_64 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32)) return 1; #endif return 0; @@ -559,7 +626,7 @@ static int is_f00f_bug(struct pt_regs *r } static const char nx_warning[] = KERN_CRIT -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n"; static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, @@ -568,15 +635,26 @@ show_fault_oops(struct pt_regs *regs, un if (!oops_may_print()) return; - if (error_code & PF_INSTR) { + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) { unsigned int level; pte_t *pte = lookup_address(address, &level); if (pte && pte_present(*pte) && !pte_exec(*pte)) - printk(nx_warning, current_uid()); + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current)); } +#ifdef CONFIG_PAX_KERNEXEC + if (init_mm.start_code <= address && address < init_mm.end_code) { + if (current->signal->curr_ip) + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", + ¤t->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid()); + else + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", + current->comm, task_pid_nr(current), current_uid(), current_euid()); + } +#endif + printk(KERN_ALERT "BUG: unable to handle kernel "); if (address < PAGE_SIZE) printk(KERN_CONT "NULL pointer dereference"); @@ -701,6 +779,68 @@ __bad_area_nosemaphore(struct pt_regs *r unsigned long address, int si_code) { struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + +#ifdef CONFIG_X86_64 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) { + if (regs->ip == (unsigned long)vgettimeofday) { + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday); + return; + } else if (regs->ip == (unsigned long)vtime) { + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time); + return; + } else if (regs->ip == (unsigned long)vgetcpu) { + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu); + return; + } + } +#endif + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + if (mm && (error_code & PF_USER)) { + unsigned long ip = regs->ip; + + if (v8086_mode(regs)) + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff); + + /* + * It's possible to have interrupts off here: + */ + local_irq_enable(); + +#ifdef CONFIG_PAX_PAGEEXEC + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && + (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) { + +#ifdef CONFIG_PAX_EMUTRAMP + switch (pax_handle_fetch_fault(regs)) { + case 2: + return; + } +#endif + + pax_report_fault(regs, (void *)ip, (void *)regs->sp); + do_group_exit(SIGKILL); + } +#endif + +#ifdef CONFIG_PAX_SEGMEXEC + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) { + +#ifdef CONFIG_PAX_EMUTRAMP + switch (pax_handle_fetch_fault(regs)) { + case 2: + return; + } +#endif + + pax_report_fault(regs, (void *)ip, (void *)regs->sp); + do_group_exit(SIGKILL); + } +#endif + + } +#endif /* User mode accesses just cause a SIGSEGV */ if (error_code & PF_USER) { @@ -855,6 +995,99 @@ static int spurious_fault_check(unsigned return 1; } +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code) +{ + pte_t *pte; + pmd_t *pmd; + spinlock_t *ptl; + unsigned char pte_mask; + + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) || + !(mm->pax_flags & MF_PAX_PAGEEXEC)) + return 0; + + /* PaX: it's our fault, let's handle it if we can */ + + /* PaX: take a look at read faults before acquiring any locks */ + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) { + /* instruction fetch attempt from a protected page in user mode */ + up_read(&mm->mmap_sem); + +#ifdef CONFIG_PAX_EMUTRAMP + switch (pax_handle_fetch_fault(regs)) { + case 2: + return 1; + } +#endif + + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp); + do_group_exit(SIGKILL); + } + + pmd = pax_get_pmd(mm, address); + if (unlikely(!pmd)) + return 0; + + pte = pte_offset_map_lock(mm, pmd, address, &ptl); + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) { + pte_unmap_unlock(pte, ptl); + return 0; + } + + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) { + /* write attempt to a protected page in user mode */ + pte_unmap_unlock(pte, ptl); + return 0; + } + +#ifdef CONFIG_SMP + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask))) +#else + if (likely(address > get_limit(regs->cs))) +#endif + { + set_pte(pte, pte_mkread(*pte)); + __flush_tlb_one(address); + pte_unmap_unlock(pte, ptl); + up_read(&mm->mmap_sem); + return 1; + } + + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1)); + + /* + * PaX: fill DTLB with user rights and retry + */ + __asm__ __volatile__ ( + "orb %2,(%1)\n" +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC) +/* + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any* + * page fault when examined during a TLB load attempt. this is true not only + * for PTEs holding a non-present entry but also present entries that will + * raise a page fault (such as those set up by PaX, or the copy-on-write + * mechanism). in effect it means that we do *not* need to flush the TLBs + * for our target pages since their PTEs are simply not in the TLBs at all. + + * the best thing in omitting it is that we gain around 15-20% speed in the + * fast path of the page fault handler and can get rid of tracing since we + * can no longer flush unintended entries. + */ + "invlpg (%0)\n" +#endif + __copyuser_seg"testb $0,(%0)\n" + "xorb %3,(%1)\n" + : + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER) + : "memory", "cc"); + pte_unmap_unlock(pte, ptl); + up_read(&mm->mmap_sem); + return 1; +} +#endif + /* * Handle a spurious fault caused by a stale TLB entry. * @@ -927,6 +1160,9 @@ int show_unhandled_signals = 1; static inline int access_error(unsigned long error_code, struct vm_area_struct *vma) { + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC)) + return 1; + if (error_code & PF_WRITE) { /* write, present and write, not present: */ if (unlikely(!(vma->vm_flags & VM_WRITE))) @@ -960,19 +1196,33 @@ do_page_fault(struct pt_regs *regs, unsi { struct vm_area_struct *vma; struct task_struct *tsk; - unsigned long address; struct mm_struct *mm; int fault; int write = error_code & PF_WRITE; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | (write ? FAULT_FLAG_WRITE : 0); + /* Get the faulting address: */ + unsigned long address = read_cr2(); + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) { + if (!search_exception_tables(regs->ip)) { + bad_area_nosemaphore(regs, error_code, address); + return; + } + if (address < PAX_USER_SHADOW_BASE) { + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n"); + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip); + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR); + } else + address -= PAX_USER_SHADOW_BASE; + } +#endif + tsk = current; mm = tsk->mm; - /* Get the faulting address: */ - address = read_cr2(); - /* * Detect and handle instructions that would cause a page fault for * both a tracked kernel page and a userspace page. @@ -1032,7 +1282,7 @@ do_page_fault(struct pt_regs *regs, unsi * User-mode registers count as a user access even for any * potential system fault or CPU buglet: */ - if (user_mode_vm(regs)) { + if (user_mode(regs)) { local_irq_enable(); error_code |= PF_USER; } else { @@ -1087,6 +1337,11 @@ retry: might_sleep(); } +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) + if (pax_handle_pageexec_fault(regs, mm, address, error_code)) + return; +#endif + vma = find_vma(mm, address); if (unlikely(!vma)) { bad_area(regs, error_code, address); @@ -1098,18 +1353,24 @@ retry: bad_area(regs, error_code, address); return; } - if (error_code & PF_USER) { - /* - * Accessing the stack below %sp is always a bug. - * The large cushion allows instructions like enter - * and pusha to work. ("enter $65535, $31" pushes - * 32 pointers and then decrements %sp by 65535.) - */ - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { - bad_area(regs, error_code, address); - return; - } + /* + * Accessing the stack below %sp is always a bug. + * The large cushion allows instructions like enter + * and pusha to work. ("enter $65535, $31" pushes + * 32 pointers and then decrements %sp by 65535.) + */ + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) { + bad_area(regs, error_code, address); + return; } + +#ifdef CONFIG_PAX_SEGMEXEC + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) { + bad_area(regs, error_code, address); + return; + } +#endif + if (unlikely(expand_stack(vma, address))) { bad_area(regs, error_code, address); return; @@ -1164,3 +1425,199 @@ good_area: up_read(&mm->mmap_sem); } + +#ifdef CONFIG_PAX_EMUTRAMP +static int pax_handle_fetch_fault_32(struct pt_regs *regs) +{ + int err; + + do { /* PaX: gcc trampoline emulation #1 */ + unsigned char mov1, mov2; + unsigned short jmp; + unsigned int addr1, addr2; + +#ifdef CONFIG_X86_64 + if ((regs->ip + 11) >> 32) + break; +#endif + + err = get_user(mov1, (unsigned char __user *)regs->ip); + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1)); + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5)); + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6)); + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10)); + + if (err) + break; + + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) { + regs->cx = addr1; + regs->ax = addr2; + regs->ip = addr2; + return 2; + } + } while (0); + + do { /* PaX: gcc trampoline emulation #2 */ + unsigned char mov, jmp; + unsigned int addr1, addr2; + +#ifdef CONFIG_X86_64 + if ((regs->ip + 9) >> 32) + break; +#endif + + err = get_user(mov, (unsigned char __user *)regs->ip); + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1)); + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5)); + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6)); + + if (err) + break; + + if (mov == 0xB9 && jmp == 0xE9) { + regs->cx = addr1; + regs->ip = (unsigned int)(regs->ip + addr2 + 10); + return 2; + } + } while (0); + + return 1; /* PaX in action */ +} + +#ifdef CONFIG_X86_64 +static int pax_handle_fetch_fault_64(struct pt_regs *regs) +{ + int err; + + do { /* PaX: gcc trampoline emulation #1 */ + unsigned short mov1, mov2, jmp1; + unsigned char jmp2; + unsigned int addr1; + unsigned long addr2; + + err = get_user(mov1, (unsigned short __user *)regs->ip); + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2)); + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6)); + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8)); + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16)); + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18)); + + if (err) + break; + + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) { + regs->r11 = addr1; + regs->r10 = addr2; + regs->ip = addr1; + return 2; + } + } while (0); + + do { /* PaX: gcc trampoline emulation #2 */ + unsigned short mov1, mov2, jmp1; + unsigned char jmp2; + unsigned long addr1, addr2; + + err = get_user(mov1, (unsigned short __user *)regs->ip); + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2)); + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10)); + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12)); + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20)); + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22)); + + if (err) + break; + + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) { + regs->r11 = addr1; + regs->r10 = addr2; + regs->ip = addr1; + return 2; + } + } while (0); + + return 1; /* PaX in action */ +} +#endif + +/* + * PaX: decide what to do with offenders (regs->ip = fault address) + * + * returns 1 when task should be killed + * 2 when gcc trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + if (v8086_mode(regs)) + return 1; + + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP)) + return 1; + +#ifdef CONFIG_X86_32 + return pax_handle_fetch_fault_32(regs); +#else + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) + return pax_handle_fetch_fault_32(regs); + else + return pax_handle_fetch_fault_64(regs); +#endif +} +#endif + +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) +void pax_report_insns(void *pc, void *sp) +{ + long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 20; i++) { + unsigned char c; + if (get_user(c, (__force unsigned char __user *)pc+i)) + printk(KERN_CONT "?? "); + else + printk(KERN_CONT "%02x ", c); + } + printk("\n"); + + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long)); + for (i = -1; i < 80 / (long)sizeof(long); i++) { + unsigned long c; + if (get_user(c, (__force unsigned long __user *)sp+i)) +#ifdef CONFIG_X86_32 + printk(KERN_CONT "???????? "); +#else + printk(KERN_CONT "???????????????? "); +#endif + else + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c); + } + printk("\n"); +} +#endif + +/** + * probe_kernel_write(): safely attempt to write to a location + * @dst: address to write to + * @src: pointer to the data that shall be written + * @size: size of the data chunk + * + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ +long notrace probe_kernel_write(void *dst, const void *src, size_t size) +{ + long ret; + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + pagefault_disable(); + pax_open_kernel(); + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); + pax_close_kernel(); + pagefault_enable(); + set_fs(old_fs); + + return ret ? -EFAULT : 0; +} diff -urNp linux-2.6.39.1/arch/x86/mm/gup.c linux-2.6.39.1/arch/x86/mm/gup.c --- linux-2.6.39.1/arch/x86/mm/gup.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/gup.c 2011-05-22 19:36:30.000000000 -0400 @@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ, (void __user *)start, len))) return 0; diff -urNp linux-2.6.39.1/arch/x86/mm/highmem_32.c linux-2.6.39.1/arch/x86/mm/highmem_32.c --- linux-2.6.39.1/arch/x86/mm/highmem_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/highmem_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); + + pax_open_kernel(); set_pte(kmap_pte-idx, mk_pte(page, prot)); + pax_close_kernel(); return (void *)vaddr; } diff -urNp linux-2.6.39.1/arch/x86/mm/hugetlbpage.c linux-2.6.39.1/arch/x86/mm/hugetlbpage.c --- linux-2.6.39.1/arch/x86/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/hugetlbpage.c 2011-05-22 19:36:30.000000000 -0400 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long start_addr; + unsigned long start_addr, pax_task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif + + pax_task_size -= PAGE_SIZE; if (len > mm->cached_hole_size) { - start_addr = mm->free_area_cache; + start_addr = mm->free_area_cache; } else { - start_addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; + start_addr = mm->mmap_base; + mm->cached_hole_size = 0; } full_search: @@ -280,26 +287,27 @@ full_search: for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ - if (TASK_SIZE - len < addr) { + if (pax_task_size - len < addr) { /* * Start a new search - just in case we missed * some holes. */ - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = TASK_UNMAPPED_BASE; + if (start_addr != mm->mmap_base) { + start_addr = mm->mmap_base; mm->cached_hole_size = 0; goto full_search; } return -ENOMEM; } - if (!vma || addr + len <= vma->vm_start) { - mm->free_area_cache = addr + len; - return addr; - } + if (check_heap_stack_gap(vma, addr, len)) + break; if (addr + mm->cached_hole_size < vma->vm_start) mm->cached_hole_size = vma->vm_start - addr; addr = ALIGN(vma->vm_end, huge_page_size(h)); } + + mm->free_area_cache = addr + len; + return addr; } static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe { struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; - struct vm_area_struct *vma, *prev_vma; - unsigned long base = mm->mmap_base, addr = addr0; + struct vm_area_struct *vma; + unsigned long base = mm->mmap_base, addr; unsigned long largest_hole = mm->cached_hole_size; - int first_time = 1; /* don't allow allocations above current base */ if (mm->free_area_cache > base) @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe largest_hole = 0; mm->free_area_cache = base; } -try_again: + /* make sure it can fit in the remaining address space */ if (mm->free_area_cache < len) goto fail; /* either no address requested or can't fit in requested address hole */ - addr = (mm->free_area_cache - len) & huge_page_mask(h); + addr = (mm->free_area_cache - len); do { + addr &= huge_page_mask(h); + vma = find_vma(mm, addr); /* * Lookup failure means no vma is above this address, * i.e. return with success: - */ - if (!(vma = find_vma_prev(mm, addr, &prev_vma))) - return addr; - - /* * new region fits between prev_vma->vm_end and * vma->vm_start, use it: */ - if (addr + len <= vma->vm_start && - (!prev_vma || (addr >= prev_vma->vm_end))) { + if (check_heap_stack_gap(vma, addr, len)) { /* remember the address as a hint for next time */ - mm->cached_hole_size = largest_hole; - return (mm->free_area_cache = addr); - } else { - /* pull free_area_cache down to the first hole */ - if (mm->free_area_cache == vma->vm_end) { - mm->free_area_cache = vma->vm_start; - mm->cached_hole_size = largest_hole; - } + mm->cached_hole_size = largest_hole; + return (mm->free_area_cache = addr); + } + /* pull free_area_cache down to the first hole */ + if (mm->free_area_cache == vma->vm_end) { + mm->free_area_cache = vma->vm_start; + mm->cached_hole_size = largest_hole; } /* remember the largest hole we saw so far */ if (addr + largest_hole < vma->vm_start) - largest_hole = vma->vm_start - addr; + largest_hole = vma->vm_start - addr; /* try just below the current vma->vm_start */ - addr = (vma->vm_start - len) & huge_page_mask(h); - } while (len <= vma->vm_start); + addr = skip_heap_stack_gap(vma, len); + } while (!IS_ERR_VALUE(addr)); fail: /* - * if hint left us with no space for the requested - * mapping then try again: - */ - if (first_time) { - mm->free_area_cache = base; - largest_hole = 0; - first_time = 0; - goto try_again; - } - /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ - mm->free_area_cache = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE; + else +#endif + + mm->mmap_base = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + + mm->free_area_cache = mm->mmap_base; mm->cached_hole_size = ~0UL; addr = hugetlb_get_unmapped_area_bottomup(file, addr0, len, pgoff, flags); @@ -386,6 +392,7 @@ fail: /* * Restore the topdown base: */ + mm->mmap_base = base; mm->free_area_cache = base; mm->cached_hole_size = ~0UL; @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; + unsigned long pax_task_size = TASK_SIZE; if (len & ~huge_page_mask(h)) return -EINVAL; - if (len > TASK_SIZE) + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif + + pax_task_size -= PAGE_SIZE; + + if (len > pax_task_size) return -ENOMEM; if (flags & MAP_FIXED) { @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff -urNp linux-2.6.39.1/arch/x86/mm/init_32.c linux-2.6.39.1/arch/x86/mm/init_32.c --- linux-2.6.39.1/arch/x86/mm/init_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/init_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void) } /* - * Creates a middle page table and puts a pointer to it in the - * given global directory entry. This only returns the gd entry - * in non-PAE compilation mode, since the middle layer is folded. - */ -static pmd_t * __init one_md_table_init(pgd_t *pgd) -{ - pud_t *pud; - pmd_t *pmd_table; - -#ifdef CONFIG_X86_PAE - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { - if (after_bootmem) - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE); - else - pmd_table = (pmd_t *)alloc_low_page(); - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); - pud = pud_offset(pgd, 0); - BUG_ON(pmd_table != pmd_offset(pud, 0)); - - return pmd_table; - } -#endif - pud = pud_offset(pgd, 0); - pmd_table = pmd_offset(pud, 0); - - return pmd_table; -} - -/* * Create a page table and place a pointer to it in a middle page * directory entry: */ @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini page_table = (pte_t *)alloc_low_page(); paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE)); +#else set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); +#endif BUG_ON(page_table != pte_offset_kernel(pmd, 0)); } return pte_offset_kernel(pmd, 0); } +static pmd_t * __init one_md_table_init(pgd_t *pgd) +{ + pud_t *pud; + pmd_t *pmd_table; + + pud = pud_offset(pgd, 0); + pmd_table = pmd_offset(pud, 0); + + return pmd_table; +} + pmd_t * __init populate_extra_pmd(unsigned long vaddr) { int pgd_idx = pgd_index(vaddr); @@ -203,6 +188,7 @@ page_table_range_init(unsigned long star int pgd_idx, pmd_idx; unsigned long vaddr; pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; @@ -212,8 +198,13 @@ page_table_range_init(unsigned long star pgd = pgd_base + pgd_idx; for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { - pmd = one_md_table_init(pgd); - pmd = pmd + pmd_index(vaddr); + pud = pud_offset(pgd, vaddr); + pmd = pmd_offset(pud, vaddr); + +#ifdef CONFIG_X86_PAE + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT); +#endif + for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { pte = page_table_kmap_check(one_page_table_init(pmd), @@ -225,11 +216,20 @@ page_table_range_init(unsigned long star } } -static inline int is_kernel_text(unsigned long addr) +static inline int is_kernel_text(unsigned long start, unsigned long end) { - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) - return 1; - return 0; + if ((start > ktla_ktva((unsigned long)_etext) || + end <= ktla_ktva((unsigned long)_stext)) && + (start > ktla_ktva((unsigned long)_einittext) || + end <= ktla_ktva((unsigned long)_sinittext)) && + +#ifdef CONFIG_ACPI_SLEEP + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) && +#endif + + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000))) + return 0; + return 1; } /* @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo unsigned long last_map_addr = end; unsigned long start_pfn, end_pfn; pgd_t *pgd_base = swapper_pg_dir; - int pgd_idx, pmd_idx, pte_ofs; + unsigned int pgd_idx, pmd_idx, pte_ofs; unsigned long pfn; pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned pages_2m, pages_4k; @@ -281,8 +282,13 @@ repeat: pfn = start_pfn; pgd_idx = pgd_index((pfn<> PAGE_SHIFT); +#endif if (pfn >= end_pfn) continue; @@ -294,14 +300,13 @@ repeat: #endif for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; pmd++, pmd_idx++) { - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET; /* * Map with big pages if possible, otherwise * create normal page tables: */ if (use_pse) { - unsigned int addr2; pgprot_t prot = PAGE_KERNEL_LARGE; /* * first pass will use the same initial @@ -311,11 +316,7 @@ repeat: __pgprot(PTE_IDENT_ATTR | _PAGE_PSE); - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + - PAGE_OFFSET + PAGE_SIZE-1; - - if (is_kernel_text(addr) || - is_kernel_text(addr2)) + if (is_kernel_text(address, address + PMD_SIZE)) prot = PAGE_KERNEL_LARGE_EXEC; pages_2m++; @@ -332,7 +333,7 @@ repeat: pte_ofs = pte_index((pfn<> 10, - (unsigned long)&_etext, (unsigned long)&_edata, - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, + (unsigned long)&_sdata, (unsigned long)&_edata, + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10, - (unsigned long)&_text, (unsigned long)&_etext, + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext), ((unsigned long)&_etext - (unsigned long)&_text) >> 10); /* @@ -893,6 +898,7 @@ void set_kernel_text_rw(void) if (!kernel_set_to_readonly) return; + start = ktla_ktva(start); pr_debug("Set kernel text: %lx - %lx for read write\n", start, start+size); @@ -907,6 +913,7 @@ void set_kernel_text_ro(void) if (!kernel_set_to_readonly) return; + start = ktla_ktva(start); pr_debug("Set kernel text: %lx - %lx for read only\n", start, start+size); @@ -935,6 +942,7 @@ void mark_rodata_ro(void) unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; + start = ktla_ktva(start); set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); printk(KERN_INFO "Write protecting the kernel text: %luk\n", size >> 10); diff -urNp linux-2.6.39.1/arch/x86/mm/init_64.c linux-2.6.39.1/arch/x86/mm/init_64.c --- linux-2.6.39.1/arch/x86/mm/init_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/init_64.c 2011-05-22 19:36:30.000000000 -0400 @@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpa * around without checking the pgd every time. */ -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP; +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP); EXPORT_SYMBOL_GPL(__supported_pte_mask); int force_personality32; @@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long star for (address = start; address <= end; address += PGDIR_SIZE) { const pgd_t *pgd_ref = pgd_offset_k(address); + +#ifdef CONFIG_PAX_PER_CPU_PGD + unsigned long cpu; +#else struct page *page; +#endif if (pgd_none(*pgd_ref)) continue; spin_lock(&pgd_lock); + +#ifdef CONFIG_PAX_PER_CPU_PGD + for (cpu = 0; cpu < NR_CPUS; ++cpu) { + pgd_t *pgd = pgd_offset_cpu(cpu, address); +#else list_for_each_entry(page, &pgd_list, lru) { pgd_t *pgd; spinlock_t *pgt_lock; @@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long star /* the pgt_lock only for Xen */ pgt_lock = &pgd_page_get_mm(page)->page_table_lock; spin_lock(pgt_lock); +#endif if (pgd_none(*pgd)) set_pgd(pgd, *pgd_ref); @@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long star BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); +#ifndef CONFIG_PAX_PER_CPU_PGD spin_unlock(pgt_lock); +#endif + } spin_unlock(&pgd_lock); } @@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, pmd = fill_pmd(pud, vaddr); pte = fill_pte(pmd, vaddr); + pax_open_kernel(); set_pte(pte, new_pte); + pax_close_kernel(); /* * It's enough to flush this one mapping. @@ -261,14 +277,12 @@ static void __init __init_extra_mapping( pgd = pgd_offset_k((unsigned long)__va(phys)); if (pgd_none(*pgd)) { pud = (pud_t *) spp_getpage(); - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE | - _PAGE_USER)); + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE)); } pud = pud_offset(pgd, (unsigned long)__va(phys)); if (pud_none(*pud)) { pmd = (pmd_t *) spp_getpage(); - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | - _PAGE_USER)); + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE)); } pmd = pmd_offset(pud, phys); BUG_ON(!pmd_none(*pmd)); @@ -698,6 +712,12 @@ void __init mem_init(void) pci_iommu_alloc(); +#ifdef CONFIG_PAX_PER_CPU_PGD + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); +#endif + /* clear_bss() already clear the empty_zero_page */ reservedpages = 0; @@ -858,8 +878,8 @@ int kern_addr_valid(unsigned long addr) static struct vm_area_struct gate_vma = { .vm_start = VSYSCALL_START, .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), - .vm_page_prot = PAGE_READONLY_EXEC, - .vm_flags = VM_READ | VM_EXEC + .vm_page_prot = PAGE_READONLY, + .vm_flags = VM_READ }; struct vm_area_struct *get_gate_vma(struct mm_struct *mm) @@ -893,7 +913,7 @@ int in_gate_area_no_mm(unsigned long add const char *arch_vma_name(struct vm_area_struct *vma) { - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso) return "[vdso]"; if (vma == &gate_vma) return "[vsyscall]"; diff -urNp linux-2.6.39.1/arch/x86/mm/init.c linux-2.6.39.1/arch/x86/mm/init.c --- linux-2.6.39.1/arch/x86/mm/init.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/init.c 2011-06-07 19:41:11.000000000 -0400 @@ -33,7 +33,7 @@ int direct_gbpages static void __init find_early_table_space(unsigned long end, int use_pse, int use_gbpages) { - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end; phys_addr_t base; puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; @@ -315,12 +315,34 @@ unsigned long __init_refok init_memory_m */ int devmem_is_allowed(unsigned long pagenr) { - if (pagenr <= 256) +#ifdef CONFIG_GRKERNSEC_KMEM + /* allow BDA */ + if (!pagenr) + return 1; + /* allow EBDA */ + if ((0x9f000 >> PAGE_SHIFT) == pagenr) + return 1; +#else + if (!pagenr) + return 1; +#ifdef CONFIG_VM86 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT)) + return 1; +#endif +#endif + + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT)) return 1; +#ifdef CONFIG_GRKERNSEC_KMEM + /* throw out everything else below 1MB */ + if (pagenr <= 256) + return 0; +#endif if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) return 0; if (!page_is_ram(pagenr)) return 1; + return 0; } @@ -375,6 +397,86 @@ void free_init_pages(char *what, unsigne void free_initmem(void) { + +#ifdef CONFIG_PAX_KERNEXEC +#ifdef CONFIG_X86_32 + /* PaX: limit KERNEL_CS to actual size */ + unsigned long addr, limit; + struct desc_struct d; + int cpu; + + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext; + limit = (limit - 1UL) >> PAGE_SHIFT; + + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE); + for (cpu = 0; cpu < NR_CPUS; cpu++) { + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC); + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S); + } + + /* PaX: make KERNEL_CS read-only */ + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text)); + if (!paravirt_enabled()) + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT); +/* + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) { + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); + } +*/ +#ifdef CONFIG_X86_PAE + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT); +/* + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) { + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask))); + } +*/ +#endif + +#ifdef CONFIG_MODULES + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT); +#endif + +#else + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + unsigned long addr, end; + + /* PaX: make kernel code/rodata read-only, rest non-executable */ + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) { + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); + if (!pmd_present(*pmd)) + continue; + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata) + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); + else + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask))); + } + + addr = (unsigned long)__va(__pa(__START_KERNEL_map)); + end = addr + KERNEL_IMAGE_SIZE; + for (; addr < end; addr += PMD_SIZE) { + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); + if (!pmd_present(*pmd)) + continue; + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata))) + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); + } +#endif + + flush_tlb_all(); +#endif + free_init_pages("unused kernel memory", (unsigned long)(&__init_begin), (unsigned long)(&__init_end)); diff -urNp linux-2.6.39.1/arch/x86/mm/iomap_32.c linux-2.6.39.1/arch/x86/mm/iomap_32.c --- linux-2.6.39.1/arch/x86/mm/iomap_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/iomap_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + + pax_open_kernel(); set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); + pax_close_kernel(); + arch_flush_lazy_mmu_mode(); return (void *)vaddr; diff -urNp linux-2.6.39.1/arch/x86/mm/ioremap.c linux-2.6.39.1/arch/x86/mm/ioremap.c --- linux-2.6.39.1/arch/x86/mm/ioremap.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/ioremap.c 2011-05-22 19:36:30.000000000 -0400 @@ -104,7 +104,7 @@ static void __iomem *__ioremap_caller(re for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) { int is_ram = page_is_ram(pfn); - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn)))) return NULL; WARN_ON_ONCE(is_ram); } @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se early_param("early_ioremap_debug", early_ioremap_debug_setup); static __initdata int after_paging_init; -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE); static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) { @@ -381,8 +381,7 @@ void __init early_ioremap_init(void) slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); - memset(bm_pte, 0, sizeof(bm_pte)); - pmd_populate_kernel(&init_mm, pmd, bm_pte); + pmd_populate_user(&init_mm, pmd, bm_pte); /* * The boot-ioremap range spans multiple pmds, for which diff -urNp linux-2.6.39.1/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.39.1/arch/x86/mm/kmemcheck/kmemcheck.c --- linux-2.6.39.1/arch/x86/mm/kmemcheck/kmemcheck.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/kmemcheck/kmemcheck.c 2011-05-22 19:36:30.000000000 -0400 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg * memory (e.g. tracked pages)? For now, we need this to avoid * invoking kmemcheck for PnP BIOS calls. */ - if (regs->flags & X86_VM_MASK) + if (v8086_mode(regs)) return false; - if (regs->cs != __KERNEL_CS) + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS) return false; pte = kmemcheck_pte_lookup(address); diff -urNp linux-2.6.39.1/arch/x86/mm/mmap.c linux-2.6.39.1/arch/x86/mm/mmap.c --- linux-2.6.39.1/arch/x86/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/mmap.c 2011-05-22 19:36:30.000000000 -0400 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size * Leave an at least ~128 MB hole with possible stack randomization. */ #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size()) -#define MAX_GAP (TASK_SIZE/6*5) +#define MAX_GAP (pax_task_size/6*5) /* * True on X86_32 or when emulating IA32 on X86_64 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void) return rnd << PAGE_SHIFT; } -static unsigned long mmap_base(void) +static unsigned long mmap_base(struct mm_struct *mm) { unsigned long gap = rlimit(RLIMIT_STACK); + unsigned long pax_task_size = TASK_SIZE; + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + pax_task_size = SEGMEXEC_TASK_SIZE; +#endif if (gap < MIN_GAP) gap = MIN_GAP; else if (gap > MAX_GAP) gap = MAX_GAP; - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd()); + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd()); } /* * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 * does, but not when emulating X86_32 */ -static unsigned long mmap_legacy_base(void) +static unsigned long mmap_legacy_base(struct mm_struct *mm) { - if (mmap_is_ia32()) + if (mmap_is_ia32()) { + +#ifdef CONFIG_PAX_SEGMEXEC + if (mm->pax_flags & MF_PAX_SEGMEXEC) + return SEGMEXEC_TASK_UNMAPPED_BASE; + else +#endif + return TASK_UNMAPPED_BASE; - else + } else return TASK_UNMAPPED_BASE + mmap_rnd(); } @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo void arch_pick_mmap_layout(struct mm_struct *mm) { if (mmap_is_legacy()) { - mm->mmap_base = mmap_legacy_base(); + mm->mmap_base = mmap_legacy_base(mm); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base += mm->delta_mmap; +#endif + mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { - mm->mmap_base = mmap_base(); + mm->mmap_base = mmap_base(mm); + +#ifdef CONFIG_PAX_RANDMMAP + if (mm->pax_flags & MF_PAX_RANDMMAP) + mm->mmap_base -= mm->delta_mmap + mm->delta_stack; +#endif + mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } diff -urNp linux-2.6.39.1/arch/x86/mm/mmio-mod.c linux-2.6.39.1/arch/x86/mm/mmio-mod.c --- linux-2.6.39.1/arch/x86/mm/mmio-mod.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/mmio-mod.c 2011-05-22 19:36:30.000000000 -0400 @@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p, static void ioremap_trace_core(resource_size_t offset, unsigned long size, void __iomem *addr) { - static atomic_t next_id; + static atomic_unchecked_t next_id; struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL); /* These are page-unaligned. */ struct mmiotrace_map map = { @@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_ .private = trace }, .phys = offset, - .id = atomic_inc_return(&next_id) + .id = atomic_inc_return_unchecked(&next_id) }; map.map_id = trace->id; diff -urNp linux-2.6.39.1/arch/x86/mm/numa_32.c linux-2.6.39.1/arch/x86/mm/numa_32.c --- linux-2.6.39.1/arch/x86/mm/numa_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/numa_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -99,7 +99,6 @@ unsigned long node_memmap_size_bytes(int } #endif -extern unsigned long find_max_low_pfn(void); extern unsigned long highend_pfn, highstart_pfn; #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) diff -urNp linux-2.6.39.1/arch/x86/mm/pageattr.c linux-2.6.39.1/arch/x86/mm/pageattr.c --- linux-2.6.39.1/arch/x86/mm/pageattr.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/pageattr.c 2011-05-22 19:36:30.000000000 -0400 @@ -261,7 +261,7 @@ static inline pgprot_t static_protection */ #ifdef CONFIG_PCI_BIOS if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) - pgprot_val(forbidden) |= _PAGE_NX; + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; #endif /* @@ -269,9 +269,10 @@ static inline pgprot_t static_protection * Does not cover __inittext since that is gone later on. On * 64bit we do not enforce !NX on the low mapping */ - if (within(address, (unsigned long)_text, (unsigned long)_etext)) - pgprot_val(forbidden) |= _PAGE_NX; + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext))) + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; +#ifdef CONFIG_DEBUG_RODATA /* * The .rodata section needs to be read-only. Using the pfn * catches all aliases. @@ -279,6 +280,7 @@ static inline pgprot_t static_protection if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) pgprot_val(forbidden) |= _PAGE_RW; +#endif #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) /* @@ -317,6 +319,13 @@ static inline pgprot_t static_protection } #endif +#ifdef CONFIG_PAX_KERNEXEC + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) { + pgprot_val(forbidden) |= _PAGE_RW; + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; + } +#endif + prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); return prot; @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address); static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) { /* change init_mm */ + pax_open_kernel(); set_pte_atomic(kpte, pte); + #ifdef CONFIG_X86_32 if (!SHARED_KERNEL_PMD) { + +#ifdef CONFIG_PAX_PER_CPU_PGD + unsigned long cpu; +#else struct page *page; +#endif +#ifdef CONFIG_PAX_PER_CPU_PGD + for (cpu = 0; cpu < NR_CPUS; ++cpu) { + pgd_t *pgd = get_cpu_pgd(cpu); +#else list_for_each_entry(page, &pgd_list, lru) { - pgd_t *pgd; + pgd_t *pgd = (pgd_t *)page_address(page); +#endif + pud_t *pud; pmd_t *pmd; - pgd = (pgd_t *)page_address(page) + pgd_index(address); + pgd += pgd_index(address); pud = pud_offset(pgd, address); pmd = pmd_offset(pud, address); set_pte_atomic((pte_t *)pmd, pte); } } #endif + pax_close_kernel(); } static int diff -urNp linux-2.6.39.1/arch/x86/mm/pageattr-test.c linux-2.6.39.1/arch/x86/mm/pageattr-test.c --- linux-2.6.39.1/arch/x86/mm/pageattr-test.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/pageattr-test.c 2011-05-22 19:36:30.000000000 -0400 @@ -36,7 +36,7 @@ enum { static int pte_testbit(pte_t pte) { - return pte_flags(pte) & _PAGE_UNUSED1; + return pte_flags(pte) & _PAGE_CPA_TEST; } struct split_state { diff -urNp linux-2.6.39.1/arch/x86/mm/pat.c linux-2.6.39.1/arch/x86/mm/pat.c --- linux-2.6.39.1/arch/x86/mm/pat.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/pat.c 2011-05-22 19:36:30.000000000 -0400 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end) if (!entry) { printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", - current->comm, current->pid, start, end); + current->comm, task_pid_nr(current), start, end); return -EINVAL; } @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig while (cursor < to) { if (!devmem_is_allowed(pfn)) { printk(KERN_INFO - "Program %s tried to access /dev/mem between %Lx->%Lx.\n", - current->comm, from, to); + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n", + current->comm, from, to, cursor); return 0; } cursor += PAGE_SIZE; @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un printk(KERN_INFO "%s:%d ioremap_change_attr failed %s " "for %Lx-%Lx\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), cattr_name(flags), base, (unsigned long long)(base + size)); return -EINVAL; @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, if (want_flags != flags) { printk(KERN_WARNING "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), cattr_name(want_flags), (unsigned long long)paddr, (unsigned long long)(paddr + size), @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, free_memtype(paddr, paddr + size); printk(KERN_ERR "%s:%d map pfn expected mapping type %s" " for %Lx-%Lx, got %s\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), cattr_name(want_flags), (unsigned long long)paddr, (unsigned long long)(paddr + size), diff -urNp linux-2.6.39.1/arch/x86/mm/pgtable_32.c linux-2.6.39.1/arch/x86/mm/pgtable_32.c --- linux-2.6.39.1/arch/x86/mm/pgtable_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/pgtable_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, return; } pte = pte_offset_kernel(pmd, vaddr); + + pax_open_kernel(); if (pte_val(pteval)) set_pte_at(&init_mm, vaddr, pte, pteval); else pte_clear(&init_mm, vaddr, pte); + pax_close_kernel(); /* * It's enough to flush this one mapping. diff -urNp linux-2.6.39.1/arch/x86/mm/pgtable.c linux-2.6.39.1/arch/x86/mm/pgtable.c --- linux-2.6.39.1/arch/x86/mm/pgtable.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/pgtable.c 2011-05-22 19:36:30.000000000 -0400 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p list_del(&page->lru); } -#define UNSHARED_PTRS_PER_PGD \ - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT; +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) +{ + while (count--) + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER); +} +#endif + +#ifdef CONFIG_PAX_PER_CPU_PGD +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count) +{ + while (count--) + +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask); +#else + *dst++ = *src++; +#endif +} +#endif + +#ifdef CONFIG_X86_64 +#define pxd_t pud_t +#define pyd_t pgd_t +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn) +#define pxd_free(mm, pud) pud_free((mm), (pud)) +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud)) +#define pyd_offset(mm ,address) pgd_offset((mm), (address)) +#define PYD_SIZE PGDIR_SIZE +#else +#define pxd_t pmd_t +#define pyd_t pud_t +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn) +#define pxd_free(mm, pud) pmd_free((mm), (pud)) +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud)) +#define pyd_offset(mm ,address) pud_offset((mm), (address)) +#define PYD_SIZE PUD_SIZE +#endif + +#ifdef CONFIG_PAX_PER_CPU_PGD +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {} +static inline void pgd_dtor(pgd_t *pgd) {} +#else static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) { BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm)); @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd) pgd_list_del(pgd); spin_unlock(&pgd_lock); } +#endif /* * List of all pgd's needed for non-PAE so it can invalidate entries @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd) * -- wli */ -#ifdef CONFIG_X86_PAE +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE) /* * In PAE mode, we need to do a cr3 reload (=tlb flush) when * updating the top-level pagetable entries to guarantee the @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd) * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate * and initialize the kernel pmds here. */ -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) { @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm, */ flush_tlb_mm(mm); } +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD) +#define PREALLOCATED_PXDS USER_PGD_PTRS #else /* !CONFIG_X86_PAE */ /* No need to prepopulate any pagetable entries in non-PAE modes. */ -#define PREALLOCATED_PMDS 0 +#define PREALLOCATED_PXDS 0 #endif /* CONFIG_X86_PAE */ -static void free_pmds(pmd_t *pmds[]) +static void free_pxds(pxd_t *pxds[]) { int i; - for(i = 0; i < PREALLOCATED_PMDS; i++) - if (pmds[i]) - free_page((unsigned long)pmds[i]); + for(i = 0; i < PREALLOCATED_PXDS; i++) + if (pxds[i]) + free_page((unsigned long)pxds[i]); } -static int preallocate_pmds(pmd_t *pmds[]) +static int preallocate_pxds(pxd_t *pxds[]) { int i; bool failed = false; - for(i = 0; i < PREALLOCATED_PMDS; i++) { - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP); - if (pmd == NULL) + for(i = 0; i < PREALLOCATED_PXDS; i++) { + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP); + if (pxd == NULL) failed = true; - pmds[i] = pmd; + pxds[i] = pxd; } if (failed) { - free_pmds(pmds); + free_pxds(pxds); return -ENOMEM; } @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[ * preallocate which never got a corresponding vma will need to be * freed manually. */ -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp) { int i; - for(i = 0; i < PREALLOCATED_PMDS; i++) { + for(i = 0; i < PREALLOCATED_PXDS; i++) { pgd_t pgd = pgdp[i]; if (pgd_val(pgd) != 0) { - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd); - pgdp[i] = native_make_pgd(0); + set_pgd(pgdp + i, native_make_pgd(0)); - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); - pmd_free(mm, pmd); + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT); + pxd_free(mm, pxd); } } } -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[]) { - pud_t *pud; + pyd_t *pyd; unsigned long addr; int i; - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */ return; - pud = pud_offset(pgd, 0); +#ifdef CONFIG_X86_64 + pyd = pyd_offset(mm, 0L); +#else + pyd = pyd_offset(pgd, 0L); +#endif - for (addr = i = 0; i < PREALLOCATED_PMDS; - i++, pud++, addr += PUD_SIZE) { - pmd_t *pmd = pmds[i]; + for (addr = i = 0; i < PREALLOCATED_PXDS; + i++, pyd++, addr += PYD_SIZE) { + pxd_t *pxd = pxds[i]; if (i >= KERNEL_PGD_BOUNDARY) - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), - sizeof(pmd_t) * PTRS_PER_PMD); + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]), + sizeof(pxd_t) * PTRS_PER_PMD); - pud_populate(mm, pud, pmd); + pyd_populate(mm, pyd, pxd); } } pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *pgd; - pmd_t *pmds[PREALLOCATED_PMDS]; + pxd_t *pxds[PREALLOCATED_PXDS]; pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm) mm->pgd = pgd; - if (preallocate_pmds(pmds) != 0) + if (preallocate_pxds(pxds) != 0) goto out_free_pgd; if (paravirt_pgd_alloc(mm) != 0) - goto out_free_pmds; + goto out_free_pxds; /* * Make sure that pre-populating the pmds is atomic with @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm) spin_lock(&pgd_lock); pgd_ctor(mm, pgd); - pgd_prepopulate_pmd(mm, pgd, pmds); + pgd_prepopulate_pxd(mm, pgd, pxds); spin_unlock(&pgd_lock); return pgd; -out_free_pmds: - free_pmds(pmds); +out_free_pxds: + free_pxds(pxds); out_free_pgd: free_page((unsigned long)pgd); out: @@ -295,7 +344,7 @@ out: void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - pgd_mop_up_pmds(mm, pgd); + pgd_mop_up_pxds(mm, pgd); pgd_dtor(pgd); paravirt_pgd_free(mm, pgd); free_page((unsigned long)pgd); diff -urNp linux-2.6.39.1/arch/x86/mm/setup_nx.c linux-2.6.39.1/arch/x86/mm/setup_nx.c --- linux-2.6.39.1/arch/x86/mm/setup_nx.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/setup_nx.c 2011-05-22 19:36:30.000000000 -0400 @@ -5,8 +5,10 @@ #include #include +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) static int disable_nx __cpuinitdata; +#ifndef CONFIG_PAX_PAGEEXEC /* * noexec = on|off * @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str return 0; } early_param("noexec", noexec_setup); +#endif + +#endif void __cpuinit x86_configure_nx(void) { +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) if (cpu_has_nx && !disable_nx) __supported_pte_mask |= _PAGE_NX; else +#endif __supported_pte_mask &= ~_PAGE_NX; } diff -urNp linux-2.6.39.1/arch/x86/mm/tlb.c linux-2.6.39.1/arch/x86/mm/tlb.c --- linux-2.6.39.1/arch/x86/mm/tlb.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/mm/tlb.c 2011-05-22 19:36:30.000000000 -0400 @@ -65,7 +65,11 @@ void leave_mm(int cpu) BUG(); cpumask_clear_cpu(cpu, mm_cpumask(percpu_read(cpu_tlbstate.active_mm))); + +#ifndef CONFIG_PAX_PER_CPU_PGD load_cr3(swapper_pg_dir); +#endif + } EXPORT_SYMBOL_GPL(leave_mm); diff -urNp linux-2.6.39.1/arch/x86/oprofile/backtrace.c linux-2.6.39.1/arch/x86/oprofile/backtrace.c --- linux-2.6.39.1/arch/x86/oprofile/backtrace.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/oprofile/backtrace.c 2011-05-22 19:36:30.000000000 -0400 @@ -57,7 +57,7 @@ dump_user_backtrace_32(struct stack_fram struct stack_frame_ia32 *fp; /* Also check accessibility of one struct frame_head beyond */ - if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead))) return NULL; if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) return NULL; @@ -123,7 +123,7 @@ x86_backtrace(struct pt_regs * const reg { struct stack_frame *head = (struct stack_frame *)frame_pointer(regs); - if (!user_mode_vm(regs)) { + if (!user_mode(regs)) { unsigned long stack = kernel_stack_pointer(regs); if (depth) dump_trace(NULL, regs, (unsigned long *)stack, 0, diff -urNp linux-2.6.39.1/arch/x86/pci/ce4100.c linux-2.6.39.1/arch/x86/pci/ce4100.c --- linux-2.6.39.1/arch/x86/pci/ce4100.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/pci/ce4100.c 2011-05-22 19:36:30.000000000 -0400 @@ -302,7 +302,7 @@ static int ce4100_conf_write(unsigned in return pci_direct_conf1.write(seg, bus, devfn, reg, len, value); } -struct pci_raw_ops ce4100_pci_conf = { +const struct pci_raw_ops ce4100_pci_conf = { .read = ce4100_conf_read, .write = ce4100_conf_write, }; diff -urNp linux-2.6.39.1/arch/x86/pci/common.c linux-2.6.39.1/arch/x86/pci/common.c --- linux-2.6.39.1/arch/x86/pci/common.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/pci/common.c 2011-05-22 19:36:30.000000000 -0400 @@ -33,8 +33,8 @@ int noioapicreroute = 1; int pcibios_last_bus = -1; unsigned long pirq_table_addr; struct pci_bus *pci_root_bus; -struct pci_raw_ops *raw_pci_ops; -struct pci_raw_ops *raw_pci_ext_ops; +const struct pci_raw_ops *raw_pci_ops; +const struct pci_raw_ops *raw_pci_ext_ops; int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, int reg, int len, u32 *val) diff -urNp linux-2.6.39.1/arch/x86/pci/direct.c linux-2.6.39.1/arch/x86/pci/direct.c --- linux-2.6.39.1/arch/x86/pci/direct.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/pci/direct.c 2011-05-22 19:36:30.000000000 -0400 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int #undef PCI_CONF1_ADDRESS -struct pci_raw_ops pci_direct_conf1 = { +const struct pci_raw_ops pci_direct_conf1 = { .read = pci_conf1_read, .write = pci_conf1_write, }; @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int #undef PCI_CONF2_ADDRESS -struct pci_raw_ops pci_direct_conf2 = { +const struct pci_raw_ops pci_direct_conf2 = { .read = pci_conf2_read, .write = pci_conf2_write, }; @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = { * This should be close to trivial, but it isn't, because there are buggy * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID. */ -static int __init pci_sanity_check(struct pci_raw_ops *o) +static int __init pci_sanity_check(const struct pci_raw_ops *o) { u32 x = 0; int year, devfn; diff -urNp linux-2.6.39.1/arch/x86/pci/fixup.c linux-2.6.39.1/arch/x86/pci/fixup.c --- linux-2.6.39.1/arch/x86/pci/fixup.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/pci/fixup.c 2011-05-22 19:36:30.000000000 -0400 @@ -435,7 +435,7 @@ static const struct dmi_system_id __devi DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"), }, }, - { } + {} }; static void __devinit pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev) diff -urNp linux-2.6.39.1/arch/x86/pci/mmconfig_32.c linux-2.6.39.1/arch/x86/pci/mmconfig_32.c --- linux-2.6.39.1/arch/x86/pci/mmconfig_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/pci/mmconfig_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -117,7 +117,7 @@ static int pci_mmcfg_write(unsigned int return 0; } -static struct pci_raw_ops pci_mmcfg = { +static const struct pci_raw_ops pci_mmcfg = { .read = pci_mmcfg_read, .write = pci_mmcfg_write, }; diff -urNp linux-2.6.39.1/arch/x86/pci/mmconfig_64.c linux-2.6.39.1/arch/x86/pci/mmconfig_64.c --- linux-2.6.39.1/arch/x86/pci/mmconfig_64.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/pci/mmconfig_64.c 2011-05-22 19:36:30.000000000 -0400 @@ -81,7 +81,7 @@ static int pci_mmcfg_write(unsigned int return 0; } -static struct pci_raw_ops pci_mmcfg = { +static const struct pci_raw_ops pci_mmcfg = { .read = pci_mmcfg_read, .write = pci_mmcfg_write, }; diff -urNp linux-2.6.39.1/arch/x86/pci/mrst.c linux-2.6.39.1/arch/x86/pci/mrst.c --- linux-2.6.39.1/arch/x86/pci/mrst.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/pci/mrst.c 2011-05-22 19:36:30.000000000 -0400 @@ -218,7 +218,7 @@ static int mrst_pci_irq_enable(struct pc return 0; } -struct pci_ops pci_mrst_ops = { +const struct pci_ops pci_mrst_ops = { .read = pci_read, .write = pci_write, }; diff -urNp linux-2.6.39.1/arch/x86/pci/numaq_32.c linux-2.6.39.1/arch/x86/pci/numaq_32.c --- linux-2.6.39.1/arch/x86/pci/numaq_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/pci/numaq_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -108,7 +108,7 @@ static int pci_conf1_mq_write(unsigned i #undef PCI_CONF1_MQ_ADDRESS -static struct pci_raw_ops pci_direct_conf1_mq = { +static const struct pci_raw_ops pci_direct_conf1_mq = { .read = pci_conf1_mq_read, .write = pci_conf1_mq_write }; diff -urNp linux-2.6.39.1/arch/x86/pci/olpc.c linux-2.6.39.1/arch/x86/pci/olpc.c --- linux-2.6.39.1/arch/x86/pci/olpc.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/pci/olpc.c 2011-05-22 19:36:30.000000000 -0400 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s return 0; } -static struct pci_raw_ops pci_olpc_conf = { +static const struct pci_raw_ops pci_olpc_conf = { .read = pci_olpc_read, .write = pci_olpc_write, }; diff -urNp linux-2.6.39.1/arch/x86/pci/pcbios.c linux-2.6.39.1/arch/x86/pci/pcbios.c --- linux-2.6.39.1/arch/x86/pci/pcbios.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/pci/pcbios.c 2011-05-22 19:36:30.000000000 -0400 @@ -79,50 +79,93 @@ union bios32 { static struct { unsigned long address; unsigned short segment; -} bios32_indirect = { 0, __KERNEL_CS }; +} bios32_indirect __read_only = { 0, __PCIBIOS_CS }; /* * Returns the entry point for the given service, NULL on error */ -static unsigned long bios32_service(unsigned long service) +static unsigned long __devinit bios32_service(unsigned long service) { unsigned char return_code; /* %al */ unsigned long address; /* %ebx */ unsigned long length; /* %ecx */ unsigned long entry; /* %edx */ unsigned long flags; + struct desc_struct d, *gdt; local_irq_save(flags); - __asm__("lcall *(%%edi); cld" + + gdt = get_cpu_gdt_table(smp_processor_id()); + + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC); + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S); + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC); + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S); + + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld" : "=a" (return_code), "=b" (address), "=c" (length), "=d" (entry) : "0" (service), "1" (0), - "D" (&bios32_indirect)); + "D" (&bios32_indirect), + "r"(__PCIBIOS_DS) + : "memory"); + + pax_open_kernel(); + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0; + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0; + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0; + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0; + pax_close_kernel(); + local_irq_restore(flags); switch (return_code) { - case 0: - return address + entry; - case 0x80: /* Not present */ - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service); - return 0; - default: /* Shouldn't happen */ - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n", - service, return_code); + case 0: { + int cpu; + unsigned char flags; + + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry); + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) { + printk(KERN_WARNING "bios32_service: not valid\n"); return 0; + } + address = address + PAGE_OFFSET; + length += 16UL; /* some BIOSs underreport this... */ + flags = 4; + if (length >= 64*1024*1024) { + length >>= PAGE_SHIFT; + flags |= 8; + } + + for (cpu = 0; cpu < NR_CPUS; cpu++) { + gdt = get_cpu_gdt_table(cpu); + pack_descriptor(&d, address, length, 0x9b, flags); + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S); + pack_descriptor(&d, address, length, 0x93, flags); + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S); + } + return entry; + } + case 0x80: /* Not present */ + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service); + return 0; + default: /* Shouldn't happen */ + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n", + service, return_code); + return 0; } } static struct { unsigned long address; unsigned short segment; -} pci_indirect = { 0, __KERNEL_CS }; +} pci_indirect __read_only = { 0, __PCIBIOS_CS }; -static int pci_bios_present; +static int pci_bios_present __read_only; static int __devinit check_pcibios(void) { @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void) unsigned long flags, pcibios_entry; if ((pcibios_entry = bios32_service(PCI_SERVICE))) { - pci_indirect.address = pcibios_entry + PAGE_OFFSET; + pci_indirect.address = pcibios_entry; local_irq_save(flags); - __asm__( - "lcall *(%%edi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%edi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void) "=b" (ebx), "=c" (ecx) : "1" (PCIBIOS_PCI_BIOS_PRESENT), - "D" (&pci_indirect) + "D" (&pci_indirect), + "r" (__PCIBIOS_DS) : "memory"); local_irq_restore(flags); @@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se switch (len) { case 1: - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se : "1" (PCIBIOS_READ_CONFIG_BYTE), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); /* * Zero-extend the result beyond 8 bits, do not trust the * BIOS having done it: @@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se *value &= 0xff; break; case 2: - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se : "1" (PCIBIOS_READ_CONFIG_WORD), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); /* * Zero-extend the result beyond 16 bits, do not trust the * BIOS having done it: @@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se *value &= 0xffff; break; case 4: - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se : "1" (PCIBIOS_READ_CONFIG_DWORD), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); break; } @@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s switch (len) { case 1: - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s "c" (value), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); break; case 2: - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s "c" (value), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); break; case 4: - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w6, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n\t" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s "c" (value), "b" (bx), "D" ((long)reg), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); break; } @@ -301,7 +371,7 @@ static int pci_bios_write(unsigned int s * Function table for BIOS32 access */ -static struct pci_raw_ops pci_bios_access = { +static const struct pci_raw_ops pci_bios_access = { .read = pci_bios_read, .write = pci_bios_write }; @@ -310,7 +380,7 @@ static struct pci_raw_ops pci_bios_acces * Try to find PCI BIOS. */ -static struct pci_raw_ops * __devinit pci_find_bios(void) +static const struct pci_raw_ops * __devinit pci_find_bios(void) { union bios32 *check; unsigned char sum; @@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i DBG("PCI: Fetching IRQ routing table... "); __asm__("push %%es\n\t" + "movw %w8, %%ds\n\t" "push %%ds\n\t" "pop %%es\n\t" - "lcall *(%%esi); cld\n\t" + "lcall *%%ss:(%%esi); cld\n\t" "pop %%es\n\t" + "push %%ss\n\t" + "pop %%ds\n" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i "1" (0), "D" ((long) &opt), "S" (&pci_indirect), - "m" (opt) + "m" (opt), + "r" (__PCIBIOS_DS) : "memory"); DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map); if (ret & 0xff00) @@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d { int ret; - __asm__("lcall *(%%esi); cld\n\t" + __asm__("movw %w5, %%ds\n\t" + "lcall *%%ss:(%%esi); cld\n\t" + "push %%ss\n\t" + "pop %%ds\n" "jc 1f\n\t" "xor %%ah, %%ah\n" "1:" @@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d : "0" (PCIBIOS_SET_PCI_HW_INT), "b" ((dev->bus->number << 8) | dev->devfn), "c" ((irq << 8) | (pin + 10)), - "S" (&pci_indirect)); + "S" (&pci_indirect), + "r" (__PCIBIOS_DS)); return !(ret & 0xff00); } EXPORT_SYMBOL(pcibios_set_irq_routing); diff -urNp linux-2.6.39.1/arch/x86/pci/xen.c linux-2.6.39.1/arch/x86/pci/xen.c --- linux-2.6.39.1/arch/x86/pci/xen.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/pci/xen.c 2011-05-22 19:36:30.000000000 -0400 @@ -62,7 +62,7 @@ static int acpi_register_gsi_xen_hvm(str #include #include -struct xen_pci_frontend_ops *xen_pci_frontend; +const struct xen_pci_frontend_ops *xen_pci_frontend; EXPORT_SYMBOL_GPL(xen_pci_frontend); #define XEN_PIRQ_MSI_DATA (MSI_DATA_TRIGGER_EDGE | \ diff -urNp linux-2.6.39.1/arch/x86/platform/efi/efi_32.c linux-2.6.39.1/arch/x86/platform/efi/efi_32.c --- linux-2.6.39.1/arch/x86/platform/efi/efi_32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/platform/efi/efi_32.c 2011-05-22 19:36:30.000000000 -0400 @@ -38,70 +38,37 @@ */ static unsigned long efi_rt_eflags; -static pgd_t efi_bak_pg_dir_pointer[2]; +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS]; -void efi_call_phys_prelog(void) +void __init efi_call_phys_prelog(void) { - unsigned long cr4; - unsigned long temp; struct desc_ptr gdt_descr; local_irq_save(efi_rt_eflags); - /* - * If I don't have PAE, I should just duplicate two entries in page - * directory. If I have PAE, I just need to duplicate one entry in - * page directory. - */ - cr4 = read_cr4_safe(); - - if (cr4 & X86_CR4_PAE) { - efi_bak_pg_dir_pointer[0].pgd = - swapper_pg_dir[pgd_index(0)].pgd; - swapper_pg_dir[0].pgd = - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd; - } else { - efi_bak_pg_dir_pointer[0].pgd = - swapper_pg_dir[pgd_index(0)].pgd; - efi_bak_pg_dir_pointer[1].pgd = - swapper_pg_dir[pgd_index(0x400000)].pgd; - swapper_pg_dir[pgd_index(0)].pgd = - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd; - temp = PAGE_OFFSET + 0x400000; - swapper_pg_dir[pgd_index(0x400000)].pgd = - swapper_pg_dir[pgd_index(temp)].pgd; - } + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS); + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); /* * After the lock is released, the original page table is restored. */ __flush_tlb_all(); - gdt_descr.address = __pa(get_cpu_gdt_table(0)); + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0)); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); } -void efi_call_phys_epilog(void) +void __init efi_call_phys_epilog(void) { - unsigned long cr4; struct desc_ptr gdt_descr; - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0); + gdt_descr.address = get_cpu_gdt_table(0); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); - cr4 = read_cr4_safe(); - - if (cr4 & X86_CR4_PAE) { - swapper_pg_dir[pgd_index(0)].pgd = - efi_bak_pg_dir_pointer[0].pgd; - } else { - swapper_pg_dir[pgd_index(0)].pgd = - efi_bak_pg_dir_pointer[0].pgd; - swapper_pg_dir[pgd_index(0x400000)].pgd = - efi_bak_pg_dir_pointer[1].pgd; - } + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS); /* * After the lock is released, the original page table is restored. diff -urNp linux-2.6.39.1/arch/x86/platform/efi/efi_stub_32.S linux-2.6.39.1/arch/x86/platform/efi/efi_stub_32.S --- linux-2.6.39.1/arch/x86/platform/efi/efi_stub_32.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/platform/efi/efi_stub_32.S 2011-05-22 19:36:30.000000000 -0400 @@ -6,6 +6,7 @@ */ #include +#include #include /* @@ -20,7 +21,7 @@ * service functions will comply with gcc calling convention, too. */ -.text +__INIT ENTRY(efi_call_phys) /* * 0. The function can only be called in Linux kernel. So CS has been @@ -36,9 +37,7 @@ ENTRY(efi_call_phys) * The mapping of lower virtual memory has been created in prelog and * epilog. */ - movl $1f, %edx - subl $__PAGE_OFFSET, %edx - jmp *%edx + jmp 1f-__PAGE_OFFSET 1: /* @@ -47,14 +46,8 @@ ENTRY(efi_call_phys) * parameter 2, ..., param n. To make things easy, we save the return * address of efi_call_phys in a global variable. */ - popl %edx - movl %edx, saved_return_addr - /* get the function pointer into ECX*/ - popl %ecx - movl %ecx, efi_rt_function_ptr - movl $2f, %edx - subl $__PAGE_OFFSET, %edx - pushl %edx + popl (saved_return_addr) + popl (efi_rt_function_ptr) /* * 3. Clear PG bit in %CR0. @@ -73,9 +66,8 @@ ENTRY(efi_call_phys) /* * 5. Call the physical function. */ - jmp *%ecx + call *(efi_rt_function_ptr-__PAGE_OFFSET) -2: /* * 6. After EFI runtime service returns, control will return to * following instruction. We'd better readjust stack pointer first. @@ -88,35 +80,28 @@ ENTRY(efi_call_phys) movl %cr0, %edx orl $0x80000000, %edx movl %edx, %cr0 - jmp 1f -1: + /* * 8. Now restore the virtual mode from flat mode by * adding EIP with PAGE_OFFSET. */ - movl $1f, %edx - jmp *%edx + jmp 1f+__PAGE_OFFSET 1: /* * 9. Balance the stack. And because EAX contain the return value, * we'd better not clobber it. */ - leal efi_rt_function_ptr, %edx - movl (%edx), %ecx - pushl %ecx + pushl (efi_rt_function_ptr) /* - * 10. Push the saved return address onto the stack and return. + * 10. Return to the saved return address. */ - leal saved_return_addr, %edx - movl (%edx), %ecx - pushl %ecx - ret + jmpl *(saved_return_addr) ENDPROC(efi_call_phys) .previous -.data +__INITDATA saved_return_addr: .long 0 efi_rt_function_ptr: diff -urNp linux-2.6.39.1/arch/x86/platform/olpc/olpc_dt.c linux-2.6.39.1/arch/x86/platform/olpc/olpc_dt.c --- linux-2.6.39.1/arch/x86/platform/olpc/olpc_dt.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/platform/olpc/olpc_dt.c 2011-05-22 19:36:30.000000000 -0400 @@ -154,7 +154,7 @@ void * __init prom_early_alloc(unsigned return res; } -static struct of_pdt_ops prom_olpc_ops __initdata = { +static const struct of_pdt_ops prom_olpc_ops = { .nextprop = olpc_dt_nextprop, .getproplen = olpc_dt_getproplen, .getproperty = olpc_dt_getproperty, diff -urNp linux-2.6.39.1/arch/x86/platform/uv/tlb_uv.c linux-2.6.39.1/arch/x86/platform/uv/tlb_uv.c --- linux-2.6.39.1/arch/x86/platform/uv/tlb_uv.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/platform/uv/tlb_uv.c 2011-05-22 19:36:30.000000000 -0400 @@ -342,6 +342,8 @@ static void uv_reset_with_ipi(struct bau cpumask_t mask; struct reset_args reset_args; + pax_track_stack(); + reset_args.sender = sender; cpus_clear(mask); diff -urNp linux-2.6.39.1/arch/x86/power/cpu.c linux-2.6.39.1/arch/x86/power/cpu.c --- linux-2.6.39.1/arch/x86/power/cpu.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/power/cpu.c 2011-05-22 19:36:30.000000000 -0400 @@ -130,7 +130,7 @@ static void do_fpu_end(void) static void fix_processor_context(void) { int cpu = smp_processor_id(); - struct tss_struct *t = &per_cpu(init_tss, cpu); + struct tss_struct *t = init_tss + cpu; set_tss_desc(cpu, t); /* * This just modifies memory; should not be @@ -140,7 +140,9 @@ static void fix_processor_context(void) */ #ifdef CONFIG_X86_64 + pax_open_kernel(); get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9; + pax_close_kernel(); syscall_init(); /* This sets MSR_*STAR and related */ #endif diff -urNp linux-2.6.39.1/arch/x86/vdso/Makefile linux-2.6.39.1/arch/x86/vdso/Makefile --- linux-2.6.39.1/arch/x86/vdso/Makefile 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/vdso/Makefile 2011-05-22 19:36:30.000000000 -0400 @@ -123,7 +123,7 @@ quiet_cmd_vdso = VDSO $@ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) GCOV_PROFILE := n # diff -urNp linux-2.6.39.1/arch/x86/vdso/vclock_gettime.c linux-2.6.39.1/arch/x86/vdso/vclock_gettime.c --- linux-2.6.39.1/arch/x86/vdso/vclock_gettime.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/vdso/vclock_gettime.c 2011-05-22 19:36:30.000000000 -0400 @@ -22,24 +22,48 @@ #include #include #include +#include #include "vextern.h" #define gtod vdso_vsyscall_gtod_data +notrace noinline long __vdso_fallback_time(long *t) +{ + long secs; + asm volatile("syscall" + : "=a" (secs) + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory"); + return secs; +} + notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) { long ret; asm("syscall" : "=a" (ret) : - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory"); + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory"); return ret; } +notrace static inline cycle_t __vdso_vread_hpet(void) +{ + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); +} + +notrace static inline cycle_t __vdso_vread_tsc(void) +{ + cycle_t ret = (cycle_t)vget_cycles(); + + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last; +} + notrace static inline long vgetns(void) { long v; - cycles_t (*vread)(void); - vread = gtod->clock.vread; - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask; + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]) + v = __vdso_vread_tsc(); + else + v = __vdso_vread_hpet(); + v = (v - gtod->clock.cycle_last) & gtod->clock.mask; return (v * gtod->clock.mult) >> gtod->clock.shift; } @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) { - if (likely(gtod->sysctl_enabled)) + if (likely(gtod->sysctl_enabled && + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) || + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])))) switch (clock) { case CLOCK_REALTIME: if (likely(gtod->clock.vread)) @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid int clock_gettime(clockid_t, struct timespec *) __attribute__((weak, alias("__vdso_clock_gettime"))); -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz) { long ret; - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) { + asm("syscall" : "=a" (ret) : + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory"); + return ret; +} + +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) +{ + if (likely(gtod->sysctl_enabled && + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) || + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])))) + { if (likely(tv != NULL)) { BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != offsetof(struct timespec, tv_nsec) || @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t } return 0; } - asm("syscall" : "=a" (ret) : - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); - return ret; + return __vdso_fallback_gettimeofday(tv, tz); } int gettimeofday(struct timeval *, struct timezone *) __attribute__((weak, alias("__vdso_gettimeofday"))); diff -urNp linux-2.6.39.1/arch/x86/vdso/vdso32-setup.c linux-2.6.39.1/arch/x86/vdso/vdso32-setup.c --- linux-2.6.39.1/arch/x86/vdso/vdso32-setup.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/vdso/vdso32-setup.c 2011-05-22 19:36:30.000000000 -0400 @@ -25,6 +25,7 @@ #include #include #include +#include enum { VDSO_DISABLED = 0, @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m void enable_sep_cpu(void) { int cpu = get_cpu(); - struct tss_struct *tss = &per_cpu(init_tss, cpu); + struct tss_struct *tss = init_tss + cpu; if (!boot_cpu_has(X86_FEATURE_SEP)) { put_cpu(); @@ -249,7 +250,7 @@ static int __init gate_vma_init(void) gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; - gate_vma.vm_page_prot = __P101; + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); /* * Make sure the vDSO gets into every core dump. * Dumping its contents makes post-mortem fully interpretable later @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l if (compat) addr = VDSO_HIGH_BASE; else { - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } } - current->mm->context.vdso = (void *)addr; + current->mm->context.vdso = addr; if (compat_uses_vma || !compat) { /* @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l } current_thread_info()->sysenter_return = - VDSO32_SYMBOL(addr, SYSENTER_RETURN); + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN); up_fail: if (ret) - current->mm->context.vdso = NULL; + current->mm->context.vdso = 0; up_write(&mm->mmap_sem); @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init); const char *arch_vma_name(struct vm_area_struct *vma) { - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso) return "[vdso]"; + +#ifdef CONFIG_PAX_SEGMEXEC + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso) + return "[vdso]"; +#endif + return NULL; } @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru * Check to see if the corresponding task was created in compat vdso * mode. */ - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE) + if (mm && mm->context.vdso == VDSO_HIGH_BASE) return &gate_vma; return NULL; } diff -urNp linux-2.6.39.1/arch/x86/vdso/vdso.lds.S linux-2.6.39.1/arch/x86/vdso/vdso.lds.S --- linux-2.6.39.1/arch/x86/vdso/vdso.lds.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/vdso/vdso.lds.S 2011-06-06 17:34:26.000000000 -0400 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK; #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x; #include "vextern.h" #undef VEXTERN + +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x; +VEXTERN(fallback_gettimeofday) +VEXTERN(fallback_time) +VEXTERN(getcpu) +#undef VEXTERN diff -urNp linux-2.6.39.1/arch/x86/vdso/vextern.h linux-2.6.39.1/arch/x86/vdso/vextern.h --- linux-2.6.39.1/arch/x86/vdso/vextern.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/vdso/vextern.h 2011-05-22 19:36:30.000000000 -0400 @@ -11,6 +11,5 @@ put into vextern.h and be referenced as a pointer with vdso prefix. The main kernel later fills in the values. */ -VEXTERN(jiffies) VEXTERN(vgetcpu_mode) VEXTERN(vsyscall_gtod_data) diff -urNp linux-2.6.39.1/arch/x86/vdso/vma.c linux-2.6.39.1/arch/x86/vdso/vma.c --- linux-2.6.39.1/arch/x86/vdso/vma.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/vdso/vma.c 2011-05-22 19:36:30.000000000 -0400 @@ -58,7 +58,7 @@ static int __init init_vdso_vars(void) if (!vbase) goto oom; - if (memcmp(vbase, "\177ELF", 4)) { + if (memcmp(vbase, ELFMAG, SELFMAG)) { printk("VDSO: I'm broken; not ELF\n"); vdso_enabled = 0; } @@ -118,7 +118,7 @@ int arch_setup_additional_pages(struct l goto up_fail; } - current->mm->context.vdso = (void *)addr; + current->mm->context.vdso = addr; ret = install_special_mapping(mm, addr, vdso_size, VM_READ|VM_EXEC| @@ -126,7 +126,7 @@ int arch_setup_additional_pages(struct l VM_ALWAYSDUMP, vdso_pages); if (ret) { - current->mm->context.vdso = NULL; + current->mm->context.vdso = 0; goto up_fail; } @@ -134,10 +134,3 @@ up_fail: up_write(&mm->mmap_sem); return ret; } - -static __init int vdso_setup(char *s) -{ - vdso_enabled = simple_strtoul(s, NULL, 0); - return 0; -} -__setup("vdso=", vdso_setup); diff -urNp linux-2.6.39.1/arch/x86/xen/enlighten.c linux-2.6.39.1/arch/x86/xen/enlighten.c --- linux-2.6.39.1/arch/x86/xen/enlighten.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/xen/enlighten.c 2011-05-22 19:36:30.000000000 -0400 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info); struct shared_info xen_dummy_shared_info; -void *xen_initial_gdt; - RESERVE_BRK(shared_info_page_brk, PAGE_SIZE); __read_mostly int xen_have_vector_callback; EXPORT_SYMBOL_GPL(xen_have_vector_callback); @@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic #endif }; -static void xen_reboot(int reason) +static __noreturn void xen_reboot(int reason) { struct sched_shutdown r = { .reason = reason }; @@ -1018,17 +1016,17 @@ static void xen_reboot(int reason) BUG(); } -static void xen_restart(char *msg) +static __noreturn void xen_restart(char *msg) { xen_reboot(SHUTDOWN_reboot); } -static void xen_emergency_restart(void) +static __noreturn void xen_emergency_restart(void) { xen_reboot(SHUTDOWN_reboot); } -static void xen_machine_halt(void) +static __noreturn void xen_machine_halt(void) { xen_reboot(SHUTDOWN_poweroff); } @@ -1127,7 +1125,17 @@ asmlinkage void __init xen_start_kernel( __userpte_alloc_gfp &= ~__GFP_HIGHMEM; /* Work out if we support NX */ - x86_configure_nx(); +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 && + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) { + unsigned l, h; + + __supported_pte_mask |= _PAGE_NX; + rdmsr(MSR_EFER, l, h); + l |= EFER_NX; + wrmsr(MSR_EFER, l, h); + } +#endif xen_setup_features(); @@ -1158,13 +1166,6 @@ asmlinkage void __init xen_start_kernel( machine_ops = xen_machine_ops; - /* - * The only reliable way to retain the initial address of the - * percpu gdt_page is to remember it here, so we can go and - * mark it RW later, when the initial percpu area is freed. - */ - xen_initial_gdt = &per_cpu(gdt_page, 0); - xen_smp_init(); #ifdef CONFIG_ACPI_NUMA diff -urNp linux-2.6.39.1/arch/x86/xen/mmu.c linux-2.6.39.1/arch/x86/xen/mmu.c --- linux-2.6.39.1/arch/x86/xen/mmu.c 2011-06-03 00:04:13.000000000 -0400 +++ linux-2.6.39.1/arch/x86/xen/mmu.c 2011-06-03 00:32:05.000000000 -0400 @@ -1791,6 +1791,8 @@ __init pgd_t *xen_setup_kernel_pagetable convert_pfn_mfn(init_level4_pgt); convert_pfn_mfn(level3_ident_pgt); convert_pfn_mfn(level3_kernel_pgt); + convert_pfn_mfn(level3_vmalloc_pgt); + convert_pfn_mfn(level3_vmemmap_pgt); l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); @@ -1809,7 +1811,10 @@ __init pgd_t *xen_setup_kernel_pagetable set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO); set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO); set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); diff -urNp linux-2.6.39.1/arch/x86/xen/pci-swiotlb-xen.c linux-2.6.39.1/arch/x86/xen/pci-swiotlb-xen.c --- linux-2.6.39.1/arch/x86/xen/pci-swiotlb-xen.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/xen/pci-swiotlb-xen.c 2011-05-22 19:36:30.000000000 -0400 @@ -10,7 +10,7 @@ int xen_swiotlb __read_mostly; -static struct dma_map_ops xen_swiotlb_dma_ops = { +static const struct dma_map_ops xen_swiotlb_dma_ops = { .mapping_error = xen_swiotlb_dma_mapping_error, .alloc_coherent = xen_swiotlb_alloc_coherent, .free_coherent = xen_swiotlb_free_coherent, diff -urNp linux-2.6.39.1/arch/x86/xen/smp.c linux-2.6.39.1/arch/x86/xen/smp.c --- linux-2.6.39.1/arch/x86/xen/smp.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/xen/smp.c 2011-05-22 19:36:30.000000000 -0400 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_ { BUG_ON(smp_processor_id() != 0); native_smp_prepare_boot_cpu(); - - /* We've switched to the "real" per-cpu gdt, so make sure the - old memory can be recycled */ - make_lowmem_page_readwrite(xen_initial_gdt); - xen_filter_cpu_maps(); xen_setup_vcpu_info_placement(); } @@ -259,12 +254,12 @@ cpu_initialize_context(unsigned int cpu, gdt = get_cpu_gdt_table(cpu); ctxt->flags = VGCF_IN_KERNEL; - ctxt->user_regs.ds = __USER_DS; - ctxt->user_regs.es = __USER_DS; + ctxt->user_regs.ds = __KERNEL_DS; + ctxt->user_regs.es = __KERNEL_DS; ctxt->user_regs.ss = __KERNEL_DS; #ifdef CONFIG_X86_32 ctxt->user_regs.fs = __KERNEL_PERCPU; - ctxt->user_regs.gs = __KERNEL_STACK_CANARY; + savesegment(gs, ctxt->user_regs.gs); #else ctxt->gs_base_kernel = per_cpu_offset(cpu); #endif @@ -315,13 +310,12 @@ static int __cpuinit xen_cpu_up(unsigned int rc; per_cpu(current_task, cpu) = idle; + per_cpu(current_tinfo, cpu) = &idle->tinfo; #ifdef CONFIG_X86_32 irq_ctx_init(cpu); #else clear_tsk_thread_flag(idle, TIF_FORK); - per_cpu(kernel_stack, cpu) = - (unsigned long)task_stack_page(idle) - - KERNEL_STACK_OFFSET + THREAD_SIZE; + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE; #endif xen_setup_runstate_info(cpu); xen_setup_timer(cpu); diff -urNp linux-2.6.39.1/arch/x86/xen/xen-asm_32.S linux-2.6.39.1/arch/x86/xen/xen-asm_32.S --- linux-2.6.39.1/arch/x86/xen/xen-asm_32.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/xen/xen-asm_32.S 2011-05-22 19:36:30.000000000 -0400 @@ -83,14 +83,14 @@ ENTRY(xen_iret) ESP_OFFSET=4 # bytes pushed onto stack /* - * Store vcpu_info pointer for easy access. Do it this way to - * avoid having to reload %fs + * Store vcpu_info pointer for easy access. */ #ifdef CONFIG_SMP - GET_THREAD_INFO(%eax) - movl TI_cpu(%eax), %eax - movl __per_cpu_offset(,%eax,4), %eax - mov xen_vcpu(%eax), %eax + push %fs + mov $(__KERNEL_PERCPU), %eax + mov %eax, %fs + mov PER_CPU_VAR(xen_vcpu), %eax + pop %fs #else movl xen_vcpu, %eax #endif diff -urNp linux-2.6.39.1/arch/x86/xen/xen-head.S linux-2.6.39.1/arch/x86/xen/xen-head.S --- linux-2.6.39.1/arch/x86/xen/xen-head.S 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/xen/xen-head.S 2011-05-22 19:36:30.000000000 -0400 @@ -19,6 +19,17 @@ ENTRY(startup_xen) #ifdef CONFIG_X86_32 mov %esi,xen_start_info mov $init_thread_union+THREAD_SIZE,%esp +#ifdef CONFIG_SMP + movl $cpu_gdt_table,%edi + movl $__per_cpu_load,%eax + movw %ax,__KERNEL_PERCPU + 2(%edi) + rorl $16,%eax + movb %al,__KERNEL_PERCPU + 4(%edi) + movb %ah,__KERNEL_PERCPU + 7(%edi) + movl $__per_cpu_end - 1,%eax + subl $__per_cpu_start,%eax + movw %ax,__KERNEL_PERCPU + 0(%edi) +#endif #else mov %rsi,xen_start_info mov $init_thread_union+THREAD_SIZE,%rsp diff -urNp linux-2.6.39.1/arch/x86/xen/xen-ops.h linux-2.6.39.1/arch/x86/xen/xen-ops.h --- linux-2.6.39.1/arch/x86/xen/xen-ops.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/arch/x86/xen/xen-ops.h 2011-05-22 19:36:30.000000000 -0400 @@ -10,8 +10,6 @@ extern const char xen_hypervisor_callback[]; extern const char xen_failsafe_callback[]; -extern void *xen_initial_gdt; - struct trap_info; void xen_copy_trap_info(struct trap_info *traps); diff -urNp linux-2.6.39.1/block/blk-iopoll.c linux-2.6.39.1/block/blk-iopoll.c --- linux-2.6.39.1/block/blk-iopoll.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/block/blk-iopoll.c 2011-05-22 19:36:30.000000000 -0400 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo } EXPORT_SYMBOL(blk_iopoll_complete); -static void blk_iopoll_softirq(struct softirq_action *h) +static void blk_iopoll_softirq(void) { struct list_head *list = &__get_cpu_var(blk_cpu_iopoll); int rearm = 0, budget = blk_iopoll_budget; diff -urNp linux-2.6.39.1/block/blk-map.c linux-2.6.39.1/block/blk-map.c --- linux-2.6.39.1/block/blk-map.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/block/blk-map.c 2011-05-22 19:36:30.000000000 -0400 @@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue if (!len || !kbuf) return -EINVAL; - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf); if (do_copy) bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); else diff -urNp linux-2.6.39.1/block/blk-softirq.c linux-2.6.39.1/block/blk-softirq.c --- linux-2.6.39.1/block/blk-softirq.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/block/blk-softirq.c 2011-05-22 19:36:30.000000000 -0400 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, * Softirq action handler - move entries to local list and loop over them * while passing them to the queue registered handler. */ -static void blk_done_softirq(struct softirq_action *h) +static void blk_done_softirq(void) { struct list_head *cpu_list, local_list; diff -urNp linux-2.6.39.1/block/bsg.c linux-2.6.39.1/block/bsg.c --- linux-2.6.39.1/block/bsg.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/block/bsg.c 2011-05-22 19:36:30.000000000 -0400 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r struct sg_io_v4 *hdr, struct bsg_device *bd, fmode_t has_write_perm) { + unsigned char tmpcmd[sizeof(rq->__cmd)]; + unsigned char *cmdptr; + if (hdr->request_len > BLK_MAX_CDB) { rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); if (!rq->cmd) return -ENOMEM; - } + cmdptr = rq->cmd; + } else + cmdptr = tmpcmd; - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request, + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request, hdr->request_len)) return -EFAULT; + if (cmdptr != rq->cmd) + memcpy(rq->cmd, cmdptr, hdr->request_len); + if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { if (blk_verify_command(rq->cmd, has_write_perm)) return -EPERM; diff -urNp linux-2.6.39.1/block/scsi_ioctl.c linux-2.6.39.1/block/scsi_ioctl.c --- linux-2.6.39.1/block/scsi_ioctl.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/block/scsi_ioctl.c 2011-05-22 19:36:30.000000000 -0400 @@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command); static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, struct sg_io_hdr *hdr, fmode_t mode) { - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) + unsigned char tmpcmd[sizeof(rq->__cmd)]; + unsigned char *cmdptr; + + if (rq->cmd != rq->__cmd) + cmdptr = rq->cmd; + else + cmdptr = tmpcmd; + + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len)) return -EFAULT; + + if (cmdptr != rq->cmd) + memcpy(rq->cmd, cmdptr, hdr->cmd_len); + if (blk_verify_command(rq->cmd, mode & FMODE_WRITE)) return -EPERM; @@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue * int err; unsigned int in_len, out_len, bytes, opcode, cmdlen; char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE]; + unsigned char tmpcmd[sizeof(rq->__cmd)]; + unsigned char *cmdptr; if (!sic) return -EINVAL; @@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue * */ err = -EFAULT; rq->cmd_len = cmdlen; - if (copy_from_user(rq->cmd, sic->data, cmdlen)) + + if (rq->cmd != rq->__cmd) + cmdptr = rq->cmd; + else + cmdptr = tmpcmd; + + if (copy_from_user(cmdptr, sic->data, cmdlen)) goto error; + if (rq->cmd != cmdptr) + memcpy(rq->cmd, cmdptr, cmdlen); + if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len)) goto error; diff -urNp linux-2.6.39.1/crypto/serpent.c linux-2.6.39.1/crypto/serpent.c --- linux-2.6.39.1/crypto/serpent.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/crypto/serpent.c 2011-05-22 19:36:30.000000000 -0400 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_ u32 r0,r1,r2,r3,r4; int i; + pax_track_stack(); + /* Copy key, add padding */ for (i = 0; i < keylen; ++i) diff -urNp linux-2.6.39.1/Documentation/dontdiff linux-2.6.39.1/Documentation/dontdiff --- linux-2.6.39.1/Documentation/dontdiff 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/Documentation/dontdiff 2011-05-22 19:36:30.000000000 -0400 @@ -1,13 +1,16 @@ *.a *.aux *.bin +*.cis *.cpio *.csp +*.dbg *.dsp *.dvi *.elf *.eps *.fw +*.gcno *.gen.S *.gif *.grep @@ -38,8 +41,10 @@ *.tab.h *.tex *.ver +*.vim *.xml *_MODULES +*_reg_safe.h *_vga16.c *~ *.9 @@ -49,11 +54,16 @@ 53c700_d.h CVS ChangeSet +GPATH +GRTAGS +GSYMS +GTAGS Image Kerntypes Module.markers Module.symvers PENDING +PERF* SCCS System.map* TAGS @@ -80,8 +90,11 @@ btfixupprep build bvmlinux bzImage* +capability_names.h capflags.c classlist.h* +clut_vga16.c +common-cmds.h comp*.log compile.h* conf @@ -106,16 +119,19 @@ fore200e_mkfirm fore200e_pca_fw.c* gconf gen-devlist +gen-kdb_cmds.c gen_crc32table gen_init_cpio generated genheaders genksyms *_gray256.c +hash ihex2fw ikconfig.h* inat-tables.c initramfs_data.cpio +initramfs_data.cpio.bz2 initramfs_data.cpio.gz initramfs_list int16.c @@ -125,7 +141,6 @@ int32.c int4.c int8.c kallsyms -kconfig keywords.c ksym.c* ksym.h* @@ -149,7 +164,9 @@ mkboot mkbugboot mkcpustr mkdep +mkpiggy mkprep +mkregtable mktables mktree modpost @@ -165,6 +182,7 @@ parse.h patches* pca200e.bin pca200e_ecd.bin2 +perf-archive piggy.gz piggyback piggy.S @@ -180,7 +198,9 @@ r600_reg_safe.h raid6altivec*.c raid6int*.c raid6tables.c +regdb.c relocs +rlim_names.h rn50_reg_safe.h rs600_reg_safe.h rv515_reg_safe.h @@ -189,6 +209,7 @@ setup setup.bin setup.elf sImage +slabinfo sm_tbl* split-include syscalltab.h @@ -213,13 +234,17 @@ version.h* vmlinux vmlinux-* vmlinux.aout +vmlinux.bin.all +vmlinux.bin.bz2 vmlinux.lds +vmlinux.relocs voffset.h vsyscall.lds vsyscall_32.lds wanxlfw.inc uImage unifdef +utsrelease.h wakeup.bin wakeup.elf wakeup.lds diff -urNp linux-2.6.39.1/Documentation/filesystems/configfs/configfs_example_macros.c linux-2.6.39.1/Documentation/filesystems/configfs/configfs_example_macros.c --- linux-2.6.39.1/Documentation/filesystems/configfs/configfs_example_macros.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/Documentation/filesystems/configfs/configfs_example_macros.c 2011-05-22 19:36:30.000000000 -0400 @@ -368,7 +368,7 @@ static struct configfs_item_operations g * Note that, since no extra work is required on ->drop_item(), * no ->drop_item() is provided. */ -static struct configfs_group_operations group_children_group_ops = { +static const struct configfs_group_operations group_children_group_ops = { .make_group = group_children_make_group, }; diff -urNp linux-2.6.39.1/Documentation/filesystems/sysfs.txt linux-2.6.39.1/Documentation/filesystems/sysfs.txt --- linux-2.6.39.1/Documentation/filesystems/sysfs.txt 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/Documentation/filesystems/sysfs.txt 2011-05-22 19:36:30.000000000 -0400 @@ -125,8 +125,8 @@ set of sysfs operations for forwarding r show and store methods of the attribute owners. struct sysfs_ops { - ssize_t (*show)(struct kobject *, struct attribute *, char *); - ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); + ssize_t (* const show)(struct kobject *, struct attribute *, char *); + ssize_t (* const store)(struct kobject *, struct attribute *, const char *, size_t); }; [ Subsystems should have already defined a struct kobj_type as a diff -urNp linux-2.6.39.1/Documentation/kernel-parameters.txt linux-2.6.39.1/Documentation/kernel-parameters.txt --- linux-2.6.39.1/Documentation/kernel-parameters.txt 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/Documentation/kernel-parameters.txt 2011-05-22 19:36:30.000000000 -0400 @@ -1879,6 +1879,13 @@ bytes respectively. Such letter suffixes the specified number of seconds. This is to be used if your oopses keep scrolling off the screen. + pax_nouderef [X86] disables UDEREF. Most likely needed under certain + virtualization environments that don't cope well with the + expand down segment used by UDEREF on X86-32 or the frequent + page table updates on X86-64. + + pax_softmode= 0/1 to disable/enable PaX softmode on boot already. + pcbit= [HW,ISDN] pcd. [PARIDE] diff -urNp linux-2.6.39.1/drivers/acpi/acpi_ipmi.c linux-2.6.39.1/drivers/acpi/acpi_ipmi.c --- linux-2.6.39.1/drivers/acpi/acpi_ipmi.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/acpi/acpi_ipmi.c 2011-05-22 19:36:30.000000000 -0400 @@ -70,7 +70,7 @@ struct acpi_ipmi_device { struct ipmi_driver_data { struct list_head ipmi_devices; struct ipmi_smi_watcher bmc_events; - struct ipmi_user_hndl ipmi_hndlrs; + const struct ipmi_user_hndl ipmi_hndlrs; struct mutex ipmi_lock; }; diff -urNp linux-2.6.39.1/drivers/acpi/apei/cper.c linux-2.6.39.1/drivers/acpi/apei/cper.c --- linux-2.6.39.1/drivers/acpi/apei/cper.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/acpi/apei/cper.c 2011-05-22 19:36:30.000000000 -0400 @@ -38,12 +38,12 @@ */ u64 cper_next_record_id(void) { - static atomic64_t seq; + static atomic64_unchecked_t seq; - if (!atomic64_read(&seq)) - atomic64_set(&seq, ((u64)get_seconds()) << 32); + if (!atomic64_read_unchecked(&seq)) + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32); - return atomic64_inc_return(&seq); + return atomic64_inc_return_unchecked(&seq); } EXPORT_SYMBOL_GPL(cper_next_record_id); diff -urNp linux-2.6.39.1/drivers/acpi/battery.c linux-2.6.39.1/drivers/acpi/battery.c --- linux-2.6.39.1/drivers/acpi/battery.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/acpi/battery.c 2011-05-22 19:36:30.000000000 -0400 @@ -864,7 +864,7 @@ DECLARE_FILE_FUNCTIONS(alarm); } static struct battery_file { - struct file_operations ops; + const struct file_operations ops; mode_t mode; const char *name; } acpi_battery_file[] = { diff -urNp linux-2.6.39.1/drivers/acpi/dock.c linux-2.6.39.1/drivers/acpi/dock.c --- linux-2.6.39.1/drivers/acpi/dock.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/acpi/dock.c 2011-05-22 19:36:30.000000000 -0400 @@ -77,7 +77,7 @@ struct dock_dependent_device { struct list_head list; struct list_head hotplug_list; acpi_handle handle; - struct acpi_dock_ops *ops; + const struct acpi_dock_ops *ops; void *context; }; @@ -589,7 +589,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi * the dock driver after _DCK is executed. */ int -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops, +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops, void *context) { struct dock_dependent_device *dd; diff -urNp linux-2.6.39.1/drivers/acpi/ec_sys.c linux-2.6.39.1/drivers/acpi/ec_sys.c --- linux-2.6.39.1/drivers/acpi/ec_sys.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/acpi/ec_sys.c 2011-05-22 19:36:30.000000000 -0400 @@ -92,7 +92,7 @@ static ssize_t acpi_ec_write_io(struct f return count; } -static struct file_operations acpi_ec_io_ops = { +static const struct file_operations acpi_ec_io_ops = { .owner = THIS_MODULE, .open = acpi_ec_open_io, .read = acpi_ec_read_io, diff -urNp linux-2.6.39.1/drivers/acpi/fan.c linux-2.6.39.1/drivers/acpi/fan.c --- linux-2.6.39.1/drivers/acpi/fan.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/acpi/fan.c 2011-05-22 19:36:30.000000000 -0400 @@ -110,7 +110,7 @@ fan_set_cur_state(struct thermal_cooling return result; } -static struct thermal_cooling_device_ops fan_cooling_ops = { +static const struct thermal_cooling_device_ops fan_cooling_ops = { .get_max_state = fan_get_max_state, .get_cur_state = fan_get_cur_state, .set_cur_state = fan_set_cur_state, diff -urNp linux-2.6.39.1/drivers/acpi/power_meter.c linux-2.6.39.1/drivers/acpi/power_meter.c --- linux-2.6.39.1/drivers/acpi/power_meter.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/acpi/power_meter.c 2011-05-22 19:36:30.000000000 -0400 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d return res; temp /= 1000; - if (temp < 0) - return -EINVAL; mutex_lock(&resource->lock); resource->trip[attr->index - 7] = temp; diff -urNp linux-2.6.39.1/drivers/acpi/proc.c linux-2.6.39.1/drivers/acpi/proc.c --- linux-2.6.39.1/drivers/acpi/proc.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/acpi/proc.c 2011-05-22 19:36:30.000000000 -0400 @@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f size_t count, loff_t * ppos) { struct list_head *node, *next; - char strbuf[5]; - char str[5] = ""; - unsigned int len = count; - - if (len > 4) - len = 4; - if (len < 0) - return -EFAULT; + char strbuf[5] = {0}; - if (copy_from_user(strbuf, buffer, len)) + if (count > 4) + count = 4; + if (copy_from_user(strbuf, buffer, count)) return -EFAULT; - strbuf[len] = '\0'; - sscanf(strbuf, "%s", str); + strbuf[count] = '\0'; mutex_lock(&acpi_device_lock); list_for_each_safe(node, next, &acpi_wakeup_device_list) { @@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f if (!dev->wakeup.flags.valid) continue; - if (!strncmp(dev->pnp.bus_id, str, 4)) { + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) { if (device_can_wakeup(&dev->dev)) { bool enable = !device_may_wakeup(&dev->dev); device_set_wakeup_enable(&dev->dev, enable); diff -urNp linux-2.6.39.1/drivers/acpi/processor_driver.c linux-2.6.39.1/drivers/acpi/processor_driver.c --- linux-2.6.39.1/drivers/acpi/processor_driver.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/acpi/processor_driver.c 2011-05-22 19:36:30.000000000 -0400 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add( return 0; #endif - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0)); + BUG_ON(pr->id >= nr_cpu_ids); /* * Buggy BIOS check diff -urNp linux-2.6.39.1/drivers/acpi/processor_idle.c linux-2.6.39.1/drivers/acpi/processor_idle.c --- linux-2.6.39.1/drivers/acpi/processor_idle.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/acpi/processor_idle.c 2011-05-22 19:36:30.000000000 -0400 @@ -121,7 +121,7 @@ static struct dmi_system_id __cpuinitdat DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, (void *)1}, - {}, + {} }; diff -urNp linux-2.6.39.1/drivers/acpi/processor_thermal.c linux-2.6.39.1/drivers/acpi/processor_thermal.c --- linux-2.6.39.1/drivers/acpi/processor_thermal.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/acpi/processor_thermal.c 2011-05-22 19:36:30.000000000 -0400 @@ -244,7 +244,7 @@ processor_set_cur_state(struct thermal_c return result; } -struct thermal_cooling_device_ops processor_cooling_ops = { +const struct thermal_cooling_device_ops processor_cooling_ops = { .get_max_state = processor_get_max_state, .get_cur_state = processor_get_cur_state, .set_cur_state = processor_set_cur_state, diff -urNp linux-2.6.39.1/drivers/acpi/sysfs.c linux-2.6.39.1/drivers/acpi/sysfs.c --- linux-2.6.39.1/drivers/acpi/sysfs.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/acpi/sysfs.c 2011-05-22 19:36:30.000000000 -0400 @@ -149,12 +149,12 @@ static int param_get_debug_level(char *b return result; } -static struct kernel_param_ops param_ops_debug_layer = { +static const struct kernel_param_ops param_ops_debug_layer = { .set = param_set_uint, .get = param_get_debug_layer, }; -static struct kernel_param_ops param_ops_debug_level = { +static const struct kernel_param_ops param_ops_debug_level = { .set = param_set_uint, .get = param_get_debug_level, }; diff -urNp linux-2.6.39.1/drivers/acpi/thermal.c linux-2.6.39.1/drivers/acpi/thermal.c --- linux-2.6.39.1/drivers/acpi/thermal.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/acpi/thermal.c 2011-05-22 19:36:30.000000000 -0400 @@ -812,7 +812,7 @@ acpi_thermal_unbind_cooling_device(struc thermal_zone_unbind_cooling_device); } -static struct thermal_zone_device_ops acpi_thermal_zone_ops = { +static const struct thermal_zone_device_ops acpi_thermal_zone_ops = { .bind = acpi_thermal_bind_cooling_device, .unbind = acpi_thermal_unbind_cooling_device, .get_temp = thermal_get_temp, diff -urNp linux-2.6.39.1/drivers/acpi/video.c linux-2.6.39.1/drivers/acpi/video.c --- linux-2.6.39.1/drivers/acpi/video.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/acpi/video.c 2011-05-22 19:36:30.000000000 -0400 @@ -308,7 +308,7 @@ video_set_cur_state(struct thermal_cooli return acpi_video_device_lcd_set_level(video, level); } -static struct thermal_cooling_device_ops video_cooling_ops = { +static const struct thermal_cooling_device_ops video_cooling_ops = { .get_max_state = video_get_max_state, .get_cur_state = video_get_cur_state, .set_cur_state = video_set_cur_state, diff -urNp linux-2.6.39.1/drivers/ata/acard-ahci.c linux-2.6.39.1/drivers/ata/acard-ahci.c --- linux-2.6.39.1/drivers/ata/acard-ahci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/acard-ahci.c 2011-05-22 19:36:30.000000000 -0400 @@ -87,7 +87,7 @@ static struct scsi_host_template acard_a AHCI_SHT("acard-ahci"), }; -static struct ata_port_operations acard_ops = { +static const struct ata_port_operations acard_ops = { .inherits = &ahci_ops, .qc_prep = acard_ahci_qc_prep, .qc_fill_rtf = acard_ahci_qc_fill_rtf, diff -urNp linux-2.6.39.1/drivers/ata/ahci.c linux-2.6.39.1/drivers/ata/ahci.c --- linux-2.6.39.1/drivers/ata/ahci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/ahci.c 2011-05-22 19:36:30.000000000 -0400 @@ -94,17 +94,17 @@ static struct scsi_host_template ahci_sh AHCI_SHT("ahci"), }; -static struct ata_port_operations ahci_vt8251_ops = { +static const struct ata_port_operations ahci_vt8251_ops = { .inherits = &ahci_ops, .hardreset = ahci_vt8251_hardreset, }; -static struct ata_port_operations ahci_p5wdh_ops = { +static const struct ata_port_operations ahci_p5wdh_ops = { .inherits = &ahci_ops, .hardreset = ahci_p5wdh_hardreset, }; -static struct ata_port_operations ahci_sb600_ops = { +static const struct ata_port_operations ahci_sb600_ops = { .inherits = &ahci_ops, .softreset = ahci_sb600_softreset, .pmp_softreset = ahci_sb600_softreset, diff -urNp linux-2.6.39.1/drivers/ata/ahci.h linux-2.6.39.1/drivers/ata/ahci.h --- linux-2.6.39.1/drivers/ata/ahci.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/ahci.h 2011-05-22 19:36:30.000000000 -0400 @@ -311,7 +311,7 @@ extern struct device_attribute *ahci_sde .shost_attrs = ahci_shost_attrs, \ .sdev_attrs = ahci_sdev_attrs -extern struct ata_port_operations ahci_ops; +extern const struct ata_port_operations ahci_ops; void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, u32 opts); diff -urNp linux-2.6.39.1/drivers/ata/ata_generic.c linux-2.6.39.1/drivers/ata/ata_generic.c --- linux-2.6.39.1/drivers/ata/ata_generic.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/ata_generic.c 2011-05-22 19:36:30.000000000 -0400 @@ -101,7 +101,7 @@ static struct scsi_host_template generic ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations generic_port_ops = { +static const struct ata_port_operations generic_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_unknown, .set_mode = generic_set_mode, diff -urNp linux-2.6.39.1/drivers/ata/ata_piix.c linux-2.6.39.1/drivers/ata/ata_piix.c --- linux-2.6.39.1/drivers/ata/ata_piix.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/ata_piix.c 2011-05-22 19:36:30.000000000 -0400 @@ -335,12 +335,12 @@ static struct scsi_host_template piix_sh ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations piix_sata_ops = { +static const struct ata_port_operations piix_sata_ops = { .inherits = &ata_bmdma32_port_ops, .sff_irq_check = piix_irq_check, }; -static struct ata_port_operations piix_pata_ops = { +static const struct ata_port_operations piix_pata_ops = { .inherits = &piix_sata_ops, .cable_detect = ata_cable_40wire, .set_piomode = piix_set_piomode, @@ -348,12 +348,12 @@ static struct ata_port_operations piix_p .prereset = piix_pata_prereset, }; -static struct ata_port_operations piix_vmw_ops = { +static const struct ata_port_operations piix_vmw_ops = { .inherits = &piix_pata_ops, .bmdma_status = piix_vmw_bmdma_status, }; -static struct ata_port_operations ich_pata_ops = { +static const struct ata_port_operations ich_pata_ops = { .inherits = &piix_pata_ops, .cable_detect = ich_pata_cable_detect, .set_dmamode = ich_set_dmamode, @@ -369,7 +369,7 @@ static struct scsi_host_template piix_si .shost_attrs = piix_sidpr_shost_attrs, }; -static struct ata_port_operations piix_sidpr_sata_ops = { +static const struct ata_port_operations piix_sidpr_sata_ops = { .inherits = &piix_sata_ops, .hardreset = sata_std_hardreset, .scr_read = piix_sidpr_scr_read, diff -urNp linux-2.6.39.1/drivers/ata/libahci.c linux-2.6.39.1/drivers/ata/libahci.c --- linux-2.6.39.1/drivers/ata/libahci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/libahci.c 2011-05-22 19:36:30.000000000 -0400 @@ -141,7 +141,7 @@ struct device_attribute *ahci_sdev_attrs }; EXPORT_SYMBOL_GPL(ahci_sdev_attrs); -struct ata_port_operations ahci_ops = { +const struct ata_port_operations ahci_ops = { .inherits = &sata_pmp_port_ops, .qc_defer = ahci_pmp_qc_defer, diff -urNp linux-2.6.39.1/drivers/ata/libata-acpi.c linux-2.6.39.1/drivers/ata/libata-acpi.c --- linux-2.6.39.1/drivers/ata/libata-acpi.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/libata-acpi.c 2011-05-22 19:36:30.000000000 -0400 @@ -218,12 +218,12 @@ static void ata_acpi_dev_uevent(acpi_han ata_acpi_uevent(dev->link->ap, dev, event); } -static struct acpi_dock_ops ata_acpi_dev_dock_ops = { +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = { .handler = ata_acpi_dev_notify_dock, .uevent = ata_acpi_dev_uevent, }; -static struct acpi_dock_ops ata_acpi_ap_dock_ops = { +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = { .handler = ata_acpi_ap_notify_dock, .uevent = ata_acpi_ap_uevent, }; diff -urNp linux-2.6.39.1/drivers/ata/libata-core.c linux-2.6.39.1/drivers/ata/libata-core.c --- linux-2.6.39.1/drivers/ata/libata-core.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/libata-core.c 2011-05-22 19:36:30.000000000 -0400 @@ -4747,7 +4747,7 @@ void ata_qc_free(struct ata_queued_cmd * struct ata_port *ap; unsigned int tag; - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ ap = qc->ap; qc->flags = 0; @@ -4763,7 +4763,7 @@ void __ata_qc_complete(struct ata_queued struct ata_port *ap; struct ata_link *link; - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); ap = qc->ap; link = qc->dev->link; @@ -5756,7 +5756,7 @@ static void ata_host_stop(struct device * LOCKING: * None. */ -static void ata_finalize_port_ops(struct ata_port_operations *ops) +static void ata_finalize_port_ops(const struct ata_port_operations *ops) { static DEFINE_SPINLOCK(lock); const struct ata_port_operations *cur; @@ -5768,6 +5768,7 @@ static void ata_finalize_port_ops(struct return; spin_lock(&lock); + pax_open_kernel(); for (cur = ops->inherits; cur; cur = cur->inherits) { void **inherit = (void **)cur; @@ -5781,8 +5782,9 @@ static void ata_finalize_port_ops(struct if (IS_ERR(*pp)) *pp = NULL; - ops->inherits = NULL; + ((struct ata_port_operations *)ops)->inherits = NULL; + pax_close_kernel(); spin_unlock(&lock); } @@ -5879,7 +5881,7 @@ int ata_host_start(struct ata_host *host */ /* KILLME - the only user left is ipr */ void ata_host_init(struct ata_host *host, struct device *dev, - unsigned long flags, struct ata_port_operations *ops) + unsigned long flags, const struct ata_port_operations *ops) { spin_lock_init(&host->lock); mutex_init(&host->eh_mutex); @@ -6583,7 +6585,7 @@ static void ata_dummy_error_handler(stru /* truly dummy */ } -struct ata_port_operations ata_dummy_port_ops = { +const struct ata_port_operations ata_dummy_port_ops = { .qc_prep = ata_noop_qc_prep, .qc_issue = ata_dummy_qc_issue, .error_handler = ata_dummy_error_handler, diff -urNp linux-2.6.39.1/drivers/ata/libata-eh.c linux-2.6.39.1/drivers/ata/libata-eh.c --- linux-2.6.39.1/drivers/ata/libata-eh.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/libata-eh.c 2011-05-22 19:36:30.000000000 -0400 @@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap) { struct ata_link *link; + pax_track_stack(); + ata_for_each_link(link, ap, HOST_FIRST) ata_eh_link_report(link); } @@ -3922,7 +3924,7 @@ void ata_do_eh(struct ata_port *ap, ata_ */ void ata_std_error_handler(struct ata_port *ap) { - struct ata_port_operations *ops = ap->ops; + const struct ata_port_operations *ops = ap->ops; ata_reset_fn_t hardreset = ops->hardreset; /* ignore built-in hardreset if SCR access is not available */ diff -urNp linux-2.6.39.1/drivers/ata/libata-pmp.c linux-2.6.39.1/drivers/ata/libata-pmp.c --- linux-2.6.39.1/drivers/ata/libata-pmp.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/libata-pmp.c 2011-05-22 19:36:30.000000000 -0400 @@ -912,7 +912,7 @@ static int sata_pmp_handle_link_fail(str */ static int sata_pmp_eh_recover(struct ata_port *ap) { - struct ata_port_operations *ops = ap->ops; + const struct ata_port_operations *ops = ap->ops; int pmp_tries, link_tries[SATA_PMP_MAX_PORTS]; struct ata_link *pmp_link = &ap->link; struct ata_device *pmp_dev = pmp_link->device; diff -urNp linux-2.6.39.1/drivers/ata/pata_acpi.c linux-2.6.39.1/drivers/ata/pata_acpi.c --- linux-2.6.39.1/drivers/ata/pata_acpi.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_acpi.c 2011-05-22 19:36:30.000000000 -0400 @@ -216,7 +216,7 @@ static struct scsi_host_template pacpi_s ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations pacpi_ops = { +static const struct ata_port_operations pacpi_ops = { .inherits = &ata_bmdma_port_ops, .qc_issue = pacpi_qc_issue, .cable_detect = pacpi_cable_detect, diff -urNp linux-2.6.39.1/drivers/ata/pata_ali.c linux-2.6.39.1/drivers/ata/pata_ali.c --- linux-2.6.39.1/drivers/ata/pata_ali.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_ali.c 2011-05-22 19:36:30.000000000 -0400 @@ -363,7 +363,7 @@ static struct scsi_host_template ali_sht * Port operations for PIO only ALi */ -static struct ata_port_operations ali_early_port_ops = { +static const struct ata_port_operations ali_early_port_ops = { .inherits = &ata_sff_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = ali_set_piomode, @@ -380,7 +380,7 @@ static const struct ata_port_operations * Port operations for DMA capable ALi without cable * detect */ -static struct ata_port_operations ali_20_port_ops = { +static const struct ata_port_operations ali_20_port_ops = { .inherits = &ali_dma_base_ops, .cable_detect = ata_cable_40wire, .mode_filter = ali_20_filter, @@ -391,7 +391,7 @@ static struct ata_port_operations ali_20 /* * Port operations for DMA capable ALi with cable detect */ -static struct ata_port_operations ali_c2_port_ops = { +static const struct ata_port_operations ali_c2_port_ops = { .inherits = &ali_dma_base_ops, .check_atapi_dma = ali_check_atapi_dma, .cable_detect = ali_c2_cable_detect, @@ -402,7 +402,7 @@ static struct ata_port_operations ali_c2 /* * Port operations for DMA capable ALi with cable detect */ -static struct ata_port_operations ali_c4_port_ops = { +static const struct ata_port_operations ali_c4_port_ops = { .inherits = &ali_dma_base_ops, .check_atapi_dma = ali_check_atapi_dma, .cable_detect = ali_c2_cable_detect, @@ -412,7 +412,7 @@ static struct ata_port_operations ali_c4 /* * Port operations for DMA capable ALi with cable detect and LBA48 */ -static struct ata_port_operations ali_c5_port_ops = { +static const struct ata_port_operations ali_c5_port_ops = { .inherits = &ali_dma_base_ops, .check_atapi_dma = ali_check_atapi_dma, .dev_config = ali_warn_atapi_dma, diff -urNp linux-2.6.39.1/drivers/ata/pata_amd.c linux-2.6.39.1/drivers/ata/pata_amd.c --- linux-2.6.39.1/drivers/ata/pata_amd.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_amd.c 2011-05-22 19:36:31.000000000 -0400 @@ -397,28 +397,28 @@ static const struct ata_port_operations .prereset = amd_pre_reset, }; -static struct ata_port_operations amd33_port_ops = { +static const struct ata_port_operations amd33_port_ops = { .inherits = &amd_base_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = amd33_set_piomode, .set_dmamode = amd33_set_dmamode, }; -static struct ata_port_operations amd66_port_ops = { +static const struct ata_port_operations amd66_port_ops = { .inherits = &amd_base_port_ops, .cable_detect = ata_cable_unknown, .set_piomode = amd66_set_piomode, .set_dmamode = amd66_set_dmamode, }; -static struct ata_port_operations amd100_port_ops = { +static const struct ata_port_operations amd100_port_ops = { .inherits = &amd_base_port_ops, .cable_detect = ata_cable_unknown, .set_piomode = amd100_set_piomode, .set_dmamode = amd100_set_dmamode, }; -static struct ata_port_operations amd133_port_ops = { +static const struct ata_port_operations amd133_port_ops = { .inherits = &amd_base_port_ops, .cable_detect = amd_cable_detect, .set_piomode = amd133_set_piomode, @@ -433,13 +433,13 @@ static const struct ata_port_operations .host_stop = nv_host_stop, }; -static struct ata_port_operations nv100_port_ops = { +static const struct ata_port_operations nv100_port_ops = { .inherits = &nv_base_port_ops, .set_piomode = nv100_set_piomode, .set_dmamode = nv100_set_dmamode, }; -static struct ata_port_operations nv133_port_ops = { +static const struct ata_port_operations nv133_port_ops = { .inherits = &nv_base_port_ops, .set_piomode = nv133_set_piomode, .set_dmamode = nv133_set_dmamode, diff -urNp linux-2.6.39.1/drivers/ata/pata_arasan_cf.c linux-2.6.39.1/drivers/ata/pata_arasan_cf.c --- linux-2.6.39.1/drivers/ata/pata_arasan_cf.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_arasan_cf.c 2011-05-22 19:36:31.000000000 -0400 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str /* Handle platform specific quirks */ if (pdata->quirk) { if (pdata->quirk & CF_BROKEN_PIO) { - ap->ops->set_piomode = NULL; + pax_open_kernel(); + *(void**)&ap->ops->set_piomode = NULL; + pax_close_kernel(); ap->pio_mask = 0; } if (pdata->quirk & CF_BROKEN_MWDMA) diff -urNp linux-2.6.39.1/drivers/ata/pata_artop.c linux-2.6.39.1/drivers/ata/pata_artop.c --- linux-2.6.39.1/drivers/ata/pata_artop.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_artop.c 2011-05-22 19:36:31.000000000 -0400 @@ -312,7 +312,7 @@ static struct scsi_host_template artop_s ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations artop6210_ops = { +static const struct ata_port_operations artop6210_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = artop6210_set_piomode, @@ -321,7 +321,7 @@ static struct ata_port_operations artop6 .qc_defer = artop6210_qc_defer, }; -static struct ata_port_operations artop6260_ops = { +static const struct ata_port_operations artop6260_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = artop6260_cable_detect, .set_piomode = artop6260_set_piomode, diff -urNp linux-2.6.39.1/drivers/ata/pata_at32.c linux-2.6.39.1/drivers/ata/pata_at32.c --- linux-2.6.39.1/drivers/ata/pata_at32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_at32.c 2011-05-22 19:36:31.000000000 -0400 @@ -173,7 +173,7 @@ static struct scsi_host_template at32_sh ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations at32_port_ops = { +static const struct ata_port_operations at32_port_ops = { .inherits = &ata_sff_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = pata_at32_set_piomode, diff -urNp linux-2.6.39.1/drivers/ata/pata_at91.c linux-2.6.39.1/drivers/ata/pata_at91.c --- linux-2.6.39.1/drivers/ata/pata_at91.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_at91.c 2011-05-22 19:36:31.000000000 -0400 @@ -212,7 +212,7 @@ static struct scsi_host_template pata_at ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations pata_at91_port_ops = { +static const struct ata_port_operations pata_at91_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = pata_at91_data_xfer_noirq, diff -urNp linux-2.6.39.1/drivers/ata/pata_atiixp.c linux-2.6.39.1/drivers/ata/pata_atiixp.c --- linux-2.6.39.1/drivers/ata/pata_atiixp.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_atiixp.c 2011-05-22 19:36:31.000000000 -0400 @@ -214,7 +214,7 @@ static struct scsi_host_template atiixp_ .sg_tablesize = LIBATA_DUMB_MAX_PRD, }; -static struct ata_port_operations atiixp_port_ops = { +static const struct ata_port_operations atiixp_port_ops = { .inherits = &ata_bmdma_port_ops, .qc_prep = ata_bmdma_dumb_qc_prep, diff -urNp linux-2.6.39.1/drivers/ata/pata_atp867x.c linux-2.6.39.1/drivers/ata/pata_atp867x.c --- linux-2.6.39.1/drivers/ata/pata_atp867x.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_atp867x.c 2011-05-22 19:36:31.000000000 -0400 @@ -275,7 +275,7 @@ static struct scsi_host_template atp867x ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations atp867x_ops = { +static const struct ata_port_operations atp867x_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = atp867x_cable_detect, .set_piomode = atp867x_set_piomode, diff -urNp linux-2.6.39.1/drivers/ata/pata_bf54x.c linux-2.6.39.1/drivers/ata/pata_bf54x.c --- linux-2.6.39.1/drivers/ata/pata_bf54x.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_bf54x.c 2011-05-22 19:36:31.000000000 -0400 @@ -1420,7 +1420,7 @@ static struct scsi_host_template bfin_sh .dma_boundary = ATA_DMA_BOUNDARY, }; -static struct ata_port_operations bfin_pata_ops = { +static const struct ata_port_operations bfin_pata_ops = { .inherits = &ata_bmdma_port_ops, .set_piomode = bfin_set_piomode, diff -urNp linux-2.6.39.1/drivers/ata/pata_cmd640.c linux-2.6.39.1/drivers/ata/pata_cmd640.c --- linux-2.6.39.1/drivers/ata/pata_cmd640.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_cmd640.c 2011-05-22 19:36:31.000000000 -0400 @@ -176,7 +176,7 @@ static struct scsi_host_template cmd640_ ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations cmd640_port_ops = { +static const struct ata_port_operations cmd640_port_ops = { .inherits = &ata_sff_port_ops, /* In theory xfer_noirq is not needed once we kill the prefetcher */ .sff_data_xfer = ata_sff_data_xfer_noirq, diff -urNp linux-2.6.39.1/drivers/ata/pata_cmd64x.c linux-2.6.39.1/drivers/ata/pata_cmd64x.c --- linux-2.6.39.1/drivers/ata/pata_cmd64x.c 2011-06-03 00:04:13.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_cmd64x.c 2011-06-03 00:32:05.000000000 -0400 @@ -271,18 +271,18 @@ static const struct ata_port_operations .set_dmamode = cmd64x_set_dmamode, }; -static struct ata_port_operations cmd64x_port_ops = { +static const struct ata_port_operations cmd64x_port_ops = { .inherits = &cmd64x_base_ops, .cable_detect = ata_cable_40wire, }; -static struct ata_port_operations cmd646r1_port_ops = { +static const struct ata_port_operations cmd646r1_port_ops = { .inherits = &cmd64x_base_ops, .bmdma_stop = cmd646r1_bmdma_stop, .cable_detect = ata_cable_40wire, }; -static struct ata_port_operations cmd648_port_ops = { +static const struct ata_port_operations cmd648_port_ops = { .inherits = &cmd64x_base_ops, .bmdma_stop = cmd648_bmdma_stop, .cable_detect = cmd648_cable_detect, diff -urNp linux-2.6.39.1/drivers/ata/pata_cs5520.c linux-2.6.39.1/drivers/ata/pata_cs5520.c --- linux-2.6.39.1/drivers/ata/pata_cs5520.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_cs5520.c 2011-05-22 19:36:31.000000000 -0400 @@ -108,7 +108,7 @@ static struct scsi_host_template cs5520_ .sg_tablesize = LIBATA_DUMB_MAX_PRD, }; -static struct ata_port_operations cs5520_port_ops = { +static const struct ata_port_operations cs5520_port_ops = { .inherits = &ata_bmdma_port_ops, .qc_prep = ata_bmdma_dumb_qc_prep, .cable_detect = ata_cable_40wire, diff -urNp linux-2.6.39.1/drivers/ata/pata_cs5530.c linux-2.6.39.1/drivers/ata/pata_cs5530.c --- linux-2.6.39.1/drivers/ata/pata_cs5530.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_cs5530.c 2011-05-22 19:36:31.000000000 -0400 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_ .sg_tablesize = LIBATA_DUMB_MAX_PRD, }; -static struct ata_port_operations cs5530_port_ops = { +static const struct ata_port_operations cs5530_port_ops = { .inherits = &ata_bmdma_port_ops, .qc_prep = ata_bmdma_dumb_qc_prep, diff -urNp linux-2.6.39.1/drivers/ata/pata_cs5535.c linux-2.6.39.1/drivers/ata/pata_cs5535.c --- linux-2.6.39.1/drivers/ata/pata_cs5535.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_cs5535.c 2011-05-22 19:36:31.000000000 -0400 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_ ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations cs5535_port_ops = { +static const struct ata_port_operations cs5535_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = cs5535_cable_detect, .set_piomode = cs5535_set_piomode, diff -urNp linux-2.6.39.1/drivers/ata/pata_cs5536.c linux-2.6.39.1/drivers/ata/pata_cs5536.c --- linux-2.6.39.1/drivers/ata/pata_cs5536.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_cs5536.c 2011-05-22 19:36:31.000000000 -0400 @@ -233,7 +233,7 @@ static struct scsi_host_template cs5536_ ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations cs5536_port_ops = { +static const struct ata_port_operations cs5536_port_ops = { .inherits = &ata_bmdma32_port_ops, .cable_detect = cs5536_cable_detect, .set_piomode = cs5536_set_piomode, diff -urNp linux-2.6.39.1/drivers/ata/pata_cypress.c linux-2.6.39.1/drivers/ata/pata_cypress.c --- linux-2.6.39.1/drivers/ata/pata_cypress.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_cypress.c 2011-05-22 19:36:31.000000000 -0400 @@ -115,7 +115,7 @@ static struct scsi_host_template cy82c69 ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations cy82c693_port_ops = { +static const struct ata_port_operations cy82c693_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = cy82c693_set_piomode, diff -urNp linux-2.6.39.1/drivers/ata/pata_efar.c linux-2.6.39.1/drivers/ata/pata_efar.c --- linux-2.6.39.1/drivers/ata/pata_efar.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_efar.c 2011-05-22 19:36:31.000000000 -0400 @@ -238,7 +238,7 @@ static struct scsi_host_template efar_sh ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations efar_ops = { +static const struct ata_port_operations efar_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = efar_cable_detect, .set_piomode = efar_set_piomode, diff -urNp linux-2.6.39.1/drivers/ata/pata_hpt366.c linux-2.6.39.1/drivers/ata/pata_hpt366.c --- linux-2.6.39.1/drivers/ata/pata_hpt366.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_hpt366.c 2011-05-22 19:36:31.000000000 -0400 @@ -276,7 +276,7 @@ static struct scsi_host_template hpt36x_ * Configuration for HPT366/68 */ -static struct ata_port_operations hpt366_port_ops = { +static const struct ata_port_operations hpt366_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = hpt36x_cable_detect, .mode_filter = hpt366_filter, diff -urNp linux-2.6.39.1/drivers/ata/pata_hpt37x.c linux-2.6.39.1/drivers/ata/pata_hpt37x.c --- linux-2.6.39.1/drivers/ata/pata_hpt37x.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_hpt37x.c 2011-05-22 19:36:31.000000000 -0400 @@ -589,7 +589,7 @@ static struct scsi_host_template hpt37x_ * Configuration for HPT370 */ -static struct ata_port_operations hpt370_port_ops = { +static const struct ata_port_operations hpt370_port_ops = { .inherits = &ata_bmdma_port_ops, .bmdma_stop = hpt370_bmdma_stop, @@ -605,7 +605,7 @@ static struct ata_port_operations hpt370 * Configuration for HPT370A. Close to 370 but less filters */ -static struct ata_port_operations hpt370a_port_ops = { +static const struct ata_port_operations hpt370a_port_ops = { .inherits = &hpt370_port_ops, .mode_filter = hpt370a_filter, }; @@ -615,7 +615,7 @@ static struct ata_port_operations hpt370 * mode setting functionality. */ -static struct ata_port_operations hpt302_port_ops = { +static const struct ata_port_operations hpt302_port_ops = { .inherits = &ata_bmdma_port_ops, .bmdma_stop = hpt37x_bmdma_stop, @@ -631,7 +631,7 @@ static struct ata_port_operations hpt302 * but we have a mode filter. */ -static struct ata_port_operations hpt372_port_ops = { +static const struct ata_port_operations hpt372_port_ops = { .inherits = &hpt302_port_ops, .mode_filter = hpt372_filter, }; @@ -641,7 +641,7 @@ static struct ata_port_operations hpt372 * but we have a different cable detection procedure for function 1. */ -static struct ata_port_operations hpt374_fn1_port_ops = { +static const struct ata_port_operations hpt374_fn1_port_ops = { .inherits = &hpt372_port_ops, .cable_detect = hpt374_fn1_cable_detect, }; diff -urNp linux-2.6.39.1/drivers/ata/pata_hpt3x2n.c linux-2.6.39.1/drivers/ata/pata_hpt3x2n.c --- linux-2.6.39.1/drivers/ata/pata_hpt3x2n.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_hpt3x2n.c 2011-05-22 19:36:31.000000000 -0400 @@ -350,7 +350,7 @@ static struct scsi_host_template hpt3x2n * Configuration for HPT302N/371N. */ -static struct ata_port_operations hpt3xxn_port_ops = { +static const struct ata_port_operations hpt3xxn_port_ops = { .inherits = &ata_bmdma_port_ops, .bmdma_stop = hpt3x2n_bmdma_stop, @@ -368,7 +368,7 @@ static struct ata_port_operations hpt3xx * Configuration for HPT372N. Same as 302N/371N but we have a mode filter. */ -static struct ata_port_operations hpt372n_port_ops = { +static const struct ata_port_operations hpt372n_port_ops = { .inherits = &hpt3xxn_port_ops, .mode_filter = &hpt372n_filter, }; diff -urNp linux-2.6.39.1/drivers/ata/pata_hpt3x3.c linux-2.6.39.1/drivers/ata/pata_hpt3x3.c --- linux-2.6.39.1/drivers/ata/pata_hpt3x3.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_hpt3x3.c 2011-05-22 19:36:31.000000000 -0400 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_ ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations hpt3x3_port_ops = { +static const struct ata_port_operations hpt3x3_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = hpt3x3_set_piomode, diff -urNp linux-2.6.39.1/drivers/ata/pata_icside.c linux-2.6.39.1/drivers/ata/pata_icside.c --- linux-2.6.39.1/drivers/ata/pata_icside.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_icside.c 2011-05-22 19:36:31.000000000 -0400 @@ -320,7 +320,7 @@ static void pata_icside_postreset(struct } } -static struct ata_port_operations pata_icside_port_ops = { +static const struct ata_port_operations pata_icside_port_ops = { .inherits = &ata_bmdma_port_ops, /* no need to build any PRD tables for DMA */ .qc_prep = ata_noop_qc_prep, diff -urNp linux-2.6.39.1/drivers/ata/pata_isapnp.c linux-2.6.39.1/drivers/ata/pata_isapnp.c --- linux-2.6.39.1/drivers/ata/pata_isapnp.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_isapnp.c 2011-05-22 19:36:31.000000000 -0400 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_ ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations isapnp_port_ops = { +static const struct ata_port_operations isapnp_port_ops = { .inherits = &ata_sff_port_ops, .cable_detect = ata_cable_40wire, }; -static struct ata_port_operations isapnp_noalt_port_ops = { +static const struct ata_port_operations isapnp_noalt_port_ops = { .inherits = &ata_sff_port_ops, .cable_detect = ata_cable_40wire, /* No altstatus so we don't want to use the lost interrupt poll */ diff -urNp linux-2.6.39.1/drivers/ata/pata_it8213.c linux-2.6.39.1/drivers/ata/pata_it8213.c --- linux-2.6.39.1/drivers/ata/pata_it8213.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_it8213.c 2011-05-22 19:36:31.000000000 -0400 @@ -233,7 +233,7 @@ static struct scsi_host_template it8213_ }; -static struct ata_port_operations it8213_ops = { +static const struct ata_port_operations it8213_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = it8213_cable_detect, .set_piomode = it8213_set_piomode, diff -urNp linux-2.6.39.1/drivers/ata/pata_it821x.c linux-2.6.39.1/drivers/ata/pata_it821x.c --- linux-2.6.39.1/drivers/ata/pata_it821x.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_it821x.c 2011-05-22 19:36:31.000000000 -0400 @@ -801,7 +801,7 @@ static struct scsi_host_template it821x_ ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations it821x_smart_port_ops = { +static const struct ata_port_operations it821x_smart_port_ops = { .inherits = &ata_bmdma_port_ops, .check_atapi_dma= it821x_check_atapi_dma, @@ -815,7 +815,7 @@ static struct ata_port_operations it821x .port_start = it821x_port_start, }; -static struct ata_port_operations it821x_passthru_port_ops = { +static const struct ata_port_operations it821x_passthru_port_ops = { .inherits = &ata_bmdma_port_ops, .check_atapi_dma= it821x_check_atapi_dma, @@ -831,7 +831,7 @@ static struct ata_port_operations it821x .port_start = it821x_port_start, }; -static struct ata_port_operations it821x_rdc_port_ops = { +static const struct ata_port_operations it821x_rdc_port_ops = { .inherits = &ata_bmdma_port_ops, .check_atapi_dma= it821x_check_atapi_dma, diff -urNp linux-2.6.39.1/drivers/ata/pata_ixp4xx_cf.c linux-2.6.39.1/drivers/ata/pata_ixp4xx_cf.c --- linux-2.6.39.1/drivers/ata/pata_ixp4xx_cf.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_ixp4xx_cf.c 2011-05-22 19:36:31.000000000 -0400 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_ ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations ixp4xx_port_ops = { +static const struct ata_port_operations ixp4xx_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ixp4xx_mmio_data_xfer, .cable_detect = ata_cable_40wire, diff -urNp linux-2.6.39.1/drivers/ata/pata_jmicron.c linux-2.6.39.1/drivers/ata/pata_jmicron.c --- linux-2.6.39.1/drivers/ata/pata_jmicron.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_jmicron.c 2011-05-22 19:36:31.000000000 -0400 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations jmicron_ops = { +static const struct ata_port_operations jmicron_ops = { .inherits = &ata_bmdma_port_ops, .prereset = jmicron_pre_reset, }; diff -urNp linux-2.6.39.1/drivers/ata/pata_legacy.c linux-2.6.39.1/drivers/ata/pata_legacy.c --- linux-2.6.39.1/drivers/ata/pata_legacy.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_legacy.c 2011-05-22 19:36:31.000000000 -0400 @@ -116,7 +116,7 @@ struct legacy_probe { struct legacy_controller { const char *name; - struct ata_port_operations *ops; + const struct ata_port_operations *ops; unsigned int pio_mask; unsigned int flags; unsigned int pflags; @@ -239,12 +239,12 @@ static const struct ata_port_operations * pio_mask as well. */ -static struct ata_port_operations simple_port_ops = { +static const struct ata_port_operations simple_port_ops = { .inherits = &legacy_base_port_ops, .sff_data_xfer = ata_sff_data_xfer_noirq, }; -static struct ata_port_operations legacy_port_ops = { +static const struct ata_port_operations legacy_port_ops = { .inherits = &legacy_base_port_ops, .sff_data_xfer = ata_sff_data_xfer_noirq, .set_mode = legacy_set_mode, @@ -340,7 +340,7 @@ static unsigned int pdc_data_xfer_vlb(st return buflen; } -static struct ata_port_operations pdc20230_port_ops = { +static const struct ata_port_operations pdc20230_port_ops = { .inherits = &legacy_base_port_ops, .set_piomode = pdc20230_set_piomode, .sff_data_xfer = pdc_data_xfer_vlb, @@ -373,7 +373,7 @@ static void ht6560a_set_piomode(struct a ioread8(ap->ioaddr.status_addr); } -static struct ata_port_operations ht6560a_port_ops = { +static const struct ata_port_operations ht6560a_port_ops = { .inherits = &legacy_base_port_ops, .set_piomode = ht6560a_set_piomode, }; @@ -416,7 +416,7 @@ static void ht6560b_set_piomode(struct a ioread8(ap->ioaddr.status_addr); } -static struct ata_port_operations ht6560b_port_ops = { +static const struct ata_port_operations ht6560b_port_ops = { .inherits = &legacy_base_port_ops, .set_piomode = ht6560b_set_piomode, }; @@ -515,7 +515,7 @@ static void opti82c611a_set_piomode(stru } -static struct ata_port_operations opti82c611a_port_ops = { +static const struct ata_port_operations opti82c611a_port_ops = { .inherits = &legacy_base_port_ops, .set_piomode = opti82c611a_set_piomode, }; @@ -625,7 +625,7 @@ static unsigned int opti82c46x_qc_issue( return ata_sff_qc_issue(qc); } -static struct ata_port_operations opti82c46x_port_ops = { +static const struct ata_port_operations opti82c46x_port_ops = { .inherits = &legacy_base_port_ops, .set_piomode = opti82c46x_set_piomode, .qc_issue = opti82c46x_qc_issue, @@ -787,20 +787,20 @@ static int qdi_port(struct platform_devi return 0; } -static struct ata_port_operations qdi6500_port_ops = { +static const struct ata_port_operations qdi6500_port_ops = { .inherits = &legacy_base_port_ops, .set_piomode = qdi6500_set_piomode, .qc_issue = qdi_qc_issue, .sff_data_xfer = vlb32_data_xfer, }; -static struct ata_port_operations qdi6580_port_ops = { +static const struct ata_port_operations qdi6580_port_ops = { .inherits = &legacy_base_port_ops, .set_piomode = qdi6580_set_piomode, .sff_data_xfer = vlb32_data_xfer, }; -static struct ata_port_operations qdi6580dp_port_ops = { +static const struct ata_port_operations qdi6580dp_port_ops = { .inherits = &legacy_base_port_ops, .set_piomode = qdi6580dp_set_piomode, .qc_issue = qdi_qc_issue, @@ -872,7 +872,7 @@ static int winbond_port(struct platform_ return 0; } -static struct ata_port_operations winbond_port_ops = { +static const struct ata_port_operations winbond_port_ops = { .inherits = &legacy_base_port_ops, .set_piomode = winbond_set_piomode, .sff_data_xfer = vlb32_data_xfer, @@ -995,7 +995,7 @@ static __init int legacy_init_one(struct int pio_modes = controller->pio_mask; unsigned long io = probe->port; u32 mask = (1 << probe->slot); - struct ata_port_operations *ops = controller->ops; + const struct ata_port_operations *ops = controller->ops; struct legacy_data *ld = &legacy_data[probe->slot]; struct ata_host *host = NULL; struct ata_port *ap; diff -urNp linux-2.6.39.1/drivers/ata/pata_macio.c linux-2.6.39.1/drivers/ata/pata_macio.c --- linux-2.6.39.1/drivers/ata/pata_macio.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_macio.c 2011-05-22 19:36:31.000000000 -0400 @@ -918,9 +918,8 @@ static struct scsi_host_template pata_ma .slave_configure = pata_macio_slave_config, }; -static struct ata_port_operations pata_macio_ops = { +static const struct ata_port_operations pata_macio_ops = { .inherits = &ata_bmdma_port_ops, - .freeze = pata_macio_freeze, .set_piomode = pata_macio_set_timings, .set_dmamode = pata_macio_set_timings, diff -urNp linux-2.6.39.1/drivers/ata/pata_marvell.c linux-2.6.39.1/drivers/ata/pata_marvell.c --- linux-2.6.39.1/drivers/ata/pata_marvell.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_marvell.c 2011-05-22 19:36:31.000000000 -0400 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations marvell_ops = { +static const struct ata_port_operations marvell_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = marvell_cable_detect, .prereset = marvell_pre_reset, diff -urNp linux-2.6.39.1/drivers/ata/pata_mpc52xx.c linux-2.6.39.1/drivers/ata/pata_mpc52xx.c --- linux-2.6.39.1/drivers/ata/pata_mpc52xx.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_mpc52xx.c 2011-05-22 19:36:31.000000000 -0400 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations mpc52xx_ata_port_ops = { +static const struct ata_port_operations mpc52xx_ata_port_ops = { .inherits = &ata_bmdma_port_ops, .sff_dev_select = mpc52xx_ata_dev_select, .set_piomode = mpc52xx_ata_set_piomode, diff -urNp linux-2.6.39.1/drivers/ata/pata_mpiix.c linux-2.6.39.1/drivers/ata/pata_mpiix.c --- linux-2.6.39.1/drivers/ata/pata_mpiix.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_mpiix.c 2011-05-22 19:36:31.000000000 -0400 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations mpiix_port_ops = { +static const struct ata_port_operations mpiix_port_ops = { .inherits = &ata_sff_port_ops, .qc_issue = mpiix_qc_issue, .cable_detect = ata_cable_40wire, diff -urNp linux-2.6.39.1/drivers/ata/pata_netcell.c linux-2.6.39.1/drivers/ata/pata_netcell.c --- linux-2.6.39.1/drivers/ata/pata_netcell.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_netcell.c 2011-05-22 19:36:31.000000000 -0400 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations netcell_ops = { +static const struct ata_port_operations netcell_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_80wire, .read_id = netcell_read_id, diff -urNp linux-2.6.39.1/drivers/ata/pata_ninja32.c linux-2.6.39.1/drivers/ata/pata_ninja32.c --- linux-2.6.39.1/drivers/ata/pata_ninja32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_ninja32.c 2011-05-22 19:36:31.000000000 -0400 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32 ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations ninja32_port_ops = { +static const struct ata_port_operations ninja32_port_ops = { .inherits = &ata_bmdma_port_ops, .sff_dev_select = ninja32_dev_select, .cable_detect = ata_cable_40wire, diff -urNp linux-2.6.39.1/drivers/ata/pata_ns87410.c linux-2.6.39.1/drivers/ata/pata_ns87410.c --- linux-2.6.39.1/drivers/ata/pata_ns87410.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_ns87410.c 2011-05-22 19:36:31.000000000 -0400 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410 ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations ns87410_port_ops = { +static const struct ata_port_operations ns87410_port_ops = { .inherits = &ata_sff_port_ops, .qc_issue = ns87410_qc_issue, .cable_detect = ata_cable_40wire, diff -urNp linux-2.6.39.1/drivers/ata/pata_ns87415.c linux-2.6.39.1/drivers/ata/pata_ns87415.c --- linux-2.6.39.1/drivers/ata/pata_ns87415.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_ns87415.c 2011-05-22 19:36:31.000000000 -0400 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at } #endif /* 87560 SuperIO Support */ -static struct ata_port_operations ns87415_pata_ops = { +static const struct ata_port_operations ns87415_pata_ops = { .inherits = &ata_bmdma_port_ops, .check_atapi_dma = ns87415_check_atapi_dma, @@ -313,7 +313,7 @@ static struct ata_port_operations ns8741 }; #if defined(CONFIG_SUPERIO) -static struct ata_port_operations ns87560_pata_ops = { +static const struct ata_port_operations ns87560_pata_ops = { .inherits = &ns87415_pata_ops, .sff_tf_read = ns87560_tf_read, .sff_check_status = ns87560_check_status, diff -urNp linux-2.6.39.1/drivers/ata/pata_octeon_cf.c linux-2.6.39.1/drivers/ata/pata_octeon_cf.c --- linux-2.6.39.1/drivers/ata/pata_octeon_cf.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_octeon_cf.c 2011-05-22 19:36:31.000000000 -0400 @@ -780,7 +780,7 @@ static unsigned int octeon_cf_qc_issue(s return 0; } -static struct ata_port_operations octeon_cf_ops = { +static struct ata_port_operations octeon_cf_ops = { /* cannot be const */ .inherits = &ata_sff_port_ops, .check_atapi_dma = octeon_cf_check_atapi_dma, .qc_prep = ata_noop_qc_prep, diff -urNp linux-2.6.39.1/drivers/ata/pata_oldpiix.c linux-2.6.39.1/drivers/ata/pata_oldpiix.c --- linux-2.6.39.1/drivers/ata/pata_oldpiix.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_oldpiix.c 2011-05-22 19:36:31.000000000 -0400 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations oldpiix_pata_ops = { +static const struct ata_port_operations oldpiix_pata_ops = { .inherits = &ata_bmdma_port_ops, .qc_issue = oldpiix_qc_issue, .cable_detect = ata_cable_40wire, diff -urNp linux-2.6.39.1/drivers/ata/pata_opti.c linux-2.6.39.1/drivers/ata/pata_opti.c --- linux-2.6.39.1/drivers/ata/pata_opti.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_opti.c 2011-05-22 19:36:31.000000000 -0400 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations opti_port_ops = { +static const struct ata_port_operations opti_port_ops = { .inherits = &ata_sff_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = opti_set_piomode, diff -urNp linux-2.6.39.1/drivers/ata/pata_optidma.c linux-2.6.39.1/drivers/ata/pata_optidma.c --- linux-2.6.39.1/drivers/ata/pata_optidma.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_optidma.c 2011-05-22 19:36:31.000000000 -0400 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations optidma_port_ops = { +static const struct ata_port_operations optidma_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = optidma_set_pio_mode, @@ -346,7 +346,7 @@ static struct ata_port_operations optidm .prereset = optidma_pre_reset, }; -static struct ata_port_operations optiplus_port_ops = { +static const struct ata_port_operations optiplus_port_ops = { .inherits = &optidma_port_ops, .set_piomode = optiplus_set_pio_mode, .set_dmamode = optiplus_set_dma_mode, diff -urNp linux-2.6.39.1/drivers/ata/pata_palmld.c linux-2.6.39.1/drivers/ata/pata_palmld.c --- linux-2.6.39.1/drivers/ata/pata_palmld.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_palmld.c 2011-05-22 19:36:31.000000000 -0400 @@ -42,7 +42,7 @@ static struct scsi_host_template palmld_ ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations palmld_port_ops = { +static const struct ata_port_operations palmld_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ata_sff_data_xfer_noirq, .cable_detect = ata_cable_40wire, diff -urNp linux-2.6.39.1/drivers/ata/pata_pcmcia.c linux-2.6.39.1/drivers/ata/pata_pcmcia.c --- linux-2.6.39.1/drivers/ata/pata_pcmcia.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_pcmcia.c 2011-05-22 19:36:31.000000000 -0400 @@ -151,14 +151,14 @@ static struct scsi_host_template pcmcia_ ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations pcmcia_port_ops = { +static const struct ata_port_operations pcmcia_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ata_sff_data_xfer_noirq, .cable_detect = ata_cable_40wire, .set_mode = pcmcia_set_mode, }; -static struct ata_port_operations pcmcia_8bit_port_ops = { +static const struct ata_port_operations pcmcia_8bit_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ata_data_xfer_8bit, .cable_detect = ata_cable_40wire, @@ -205,7 +205,7 @@ static int pcmcia_init_one(struct pcmcia unsigned long io_base, ctl_base; void __iomem *io_addr, *ctl_addr; int n_ports = 1; - struct ata_port_operations *ops = &pcmcia_port_ops; + const struct ata_port_operations *ops = &pcmcia_port_ops; /* Set up attributes in order to probe card and get resources */ pdev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO | diff -urNp linux-2.6.39.1/drivers/ata/pata_pdc2027x.c linux-2.6.39.1/drivers/ata/pata_pdc2027x.c --- linux-2.6.39.1/drivers/ata/pata_pdc2027x.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_pdc2027x.c 2011-05-22 19:36:31.000000000 -0400 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027 ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations pdc2027x_pata100_ops = { +static const struct ata_port_operations pdc2027x_pata100_ops = { .inherits = &ata_bmdma_port_ops, .check_atapi_dma = pdc2027x_check_atapi_dma, .cable_detect = pdc2027x_cable_detect, .prereset = pdc2027x_prereset, }; -static struct ata_port_operations pdc2027x_pata133_ops = { +static const struct ata_port_operations pdc2027x_pata133_ops = { .inherits = &pdc2027x_pata100_ops, .mode_filter = pdc2027x_mode_filter, .set_piomode = pdc2027x_set_piomode, diff -urNp linux-2.6.39.1/drivers/ata/pata_pdc202xx_old.c linux-2.6.39.1/drivers/ata/pata_pdc202xx_old.c --- linux-2.6.39.1/drivers/ata/pata_pdc202xx_old.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_pdc202xx_old.c 2011-05-22 19:36:31.000000000 -0400 @@ -295,7 +295,7 @@ static struct scsi_host_template pdc202x ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations pdc2024x_port_ops = { +static const struct ata_port_operations pdc2024x_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_40wire, @@ -306,7 +306,7 @@ static struct ata_port_operations pdc202 .sff_irq_check = pdc202xx_irq_check, }; -static struct ata_port_operations pdc2026x_port_ops = { +static const struct ata_port_operations pdc2026x_port_ops = { .inherits = &pdc2024x_port_ops, .check_atapi_dma = pdc2026x_check_atapi_dma, diff -urNp linux-2.6.39.1/drivers/ata/pata_piccolo.c linux-2.6.39.1/drivers/ata/pata_piccolo.c --- linux-2.6.39.1/drivers/ata/pata_piccolo.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_piccolo.c 2011-05-22 19:36:31.000000000 -0400 @@ -67,7 +67,7 @@ static struct scsi_host_template tosh_sh ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations tosh_port_ops = { +static const struct ata_port_operations tosh_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_unknown, .set_piomode = tosh_set_piomode, diff -urNp linux-2.6.39.1/drivers/ata/pata_platform.c linux-2.6.39.1/drivers/ata/pata_platform.c --- linux-2.6.39.1/drivers/ata/pata_platform.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_platform.c 2011-05-22 19:36:31.000000000 -0400 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations pata_platform_port_ops = { +static const struct ata_port_operations pata_platform_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ata_sff_data_xfer_noirq, .cable_detect = ata_cable_unknown, diff -urNp linux-2.6.39.1/drivers/ata/pata_pxa.c linux-2.6.39.1/drivers/ata/pata_pxa.c --- linux-2.6.39.1/drivers/ata/pata_pxa.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_pxa.c 2011-05-22 19:36:31.000000000 -0400 @@ -198,7 +198,7 @@ static struct scsi_host_template pxa_ata ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations pxa_ata_port_ops = { +static const struct ata_port_operations pxa_ata_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_40wire, diff -urNp linux-2.6.39.1/drivers/ata/pata_qdi.c linux-2.6.39.1/drivers/ata/pata_qdi.c --- linux-2.6.39.1/drivers/ata/pata_qdi.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_qdi.c 2011-05-22 19:36:31.000000000 -0400 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations qdi6500_port_ops = { +static const struct ata_port_operations qdi6500_port_ops = { .inherits = &ata_sff_port_ops, .qc_issue = qdi_qc_issue, .sff_data_xfer = qdi_data_xfer, @@ -165,7 +165,7 @@ static struct ata_port_operations qdi650 .set_piomode = qdi6500_set_piomode, }; -static struct ata_port_operations qdi6580_port_ops = { +static const struct ata_port_operations qdi6580_port_ops = { .inherits = &qdi6500_port_ops, .set_piomode = qdi6580_set_piomode, }; diff -urNp linux-2.6.39.1/drivers/ata/pata_radisys.c linux-2.6.39.1/drivers/ata/pata_radisys.c --- linux-2.6.39.1/drivers/ata/pata_radisys.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_radisys.c 2011-05-22 19:36:31.000000000 -0400 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations radisys_pata_ops = { +static const struct ata_port_operations radisys_pata_ops = { .inherits = &ata_bmdma_port_ops, .qc_issue = radisys_qc_issue, .cable_detect = ata_cable_unknown, diff -urNp linux-2.6.39.1/drivers/ata/pata_rb532_cf.c linux-2.6.39.1/drivers/ata/pata_rb532_cf.c --- linux-2.6.39.1/drivers/ata/pata_rb532_cf.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_rb532_cf.c 2011-05-22 19:36:31.000000000 -0400 @@ -69,7 +69,7 @@ static irqreturn_t rb532_pata_irq_handle return IRQ_HANDLED; } -static struct ata_port_operations rb532_pata_port_ops = { +static const struct ata_port_operations rb532_pata_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ata_sff_data_xfer32, }; diff -urNp linux-2.6.39.1/drivers/ata/pata_rdc.c linux-2.6.39.1/drivers/ata/pata_rdc.c --- linux-2.6.39.1/drivers/ata/pata_rdc.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_rdc.c 2011-05-22 19:36:31.000000000 -0400 @@ -273,7 +273,7 @@ static void rdc_set_dmamode(struct ata_p pci_write_config_byte(dev, 0x48, udma_enable); } -static struct ata_port_operations rdc_pata_ops = { +static const struct ata_port_operations rdc_pata_ops = { .inherits = &ata_bmdma32_port_ops, .cable_detect = rdc_pata_cable_detect, .set_piomode = rdc_set_piomode, diff -urNp linux-2.6.39.1/drivers/ata/pata_rz1000.c linux-2.6.39.1/drivers/ata/pata_rz1000.c --- linux-2.6.39.1/drivers/ata/pata_rz1000.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_rz1000.c 2011-05-22 19:36:31.000000000 -0400 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_ ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations rz1000_port_ops = { +static const struct ata_port_operations rz1000_port_ops = { .inherits = &ata_sff_port_ops, .cable_detect = ata_cable_40wire, .set_mode = rz1000_set_mode, diff -urNp linux-2.6.39.1/drivers/ata/pata_samsung_cf.c linux-2.6.39.1/drivers/ata/pata_samsung_cf.c --- linux-2.6.39.1/drivers/ata/pata_samsung_cf.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_samsung_cf.c 2011-05-22 19:36:31.000000000 -0400 @@ -399,7 +399,7 @@ static struct scsi_host_template pata_s3 ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations pata_s3c_port_ops = { +static const struct ata_port_operations pata_s3c_port_ops = { .inherits = &ata_sff_port_ops, .sff_check_status = pata_s3c_check_status, .sff_check_altstatus = pata_s3c_check_altstatus, @@ -413,7 +413,7 @@ static struct ata_port_operations pata_s .set_piomode = pata_s3c_set_piomode, }; -static struct ata_port_operations pata_s5p_port_ops = { +static const struct ata_port_operations pata_s5p_port_ops = { .inherits = &ata_sff_port_ops, .set_piomode = pata_s3c_set_piomode, }; diff -urNp linux-2.6.39.1/drivers/ata/pata_sc1200.c linux-2.6.39.1/drivers/ata/pata_sc1200.c --- linux-2.6.39.1/drivers/ata/pata_sc1200.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_sc1200.c 2011-05-22 19:36:31.000000000 -0400 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_ .sg_tablesize = LIBATA_DUMB_MAX_PRD, }; -static struct ata_port_operations sc1200_port_ops = { +static const struct ata_port_operations sc1200_port_ops = { .inherits = &ata_bmdma_port_ops, .qc_prep = ata_bmdma_dumb_qc_prep, .qc_issue = sc1200_qc_issue, diff -urNp linux-2.6.39.1/drivers/ata/pata_scc.c linux-2.6.39.1/drivers/ata/pata_scc.c --- linux-2.6.39.1/drivers/ata/pata_scc.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_scc.c 2011-05-22 19:36:31.000000000 -0400 @@ -926,7 +926,7 @@ static struct scsi_host_template scc_sht ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations scc_pata_ops = { +static const struct ata_port_operations scc_pata_ops = { .inherits = &ata_bmdma_port_ops, .set_piomode = scc_set_piomode, diff -urNp linux-2.6.39.1/drivers/ata/pata_sch.c linux-2.6.39.1/drivers/ata/pata_sch.c --- linux-2.6.39.1/drivers/ata/pata_sch.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_sch.c 2011-05-22 19:36:31.000000000 -0400 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations sch_pata_ops = { +static const struct ata_port_operations sch_pata_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_unknown, .set_piomode = sch_set_piomode, diff -urNp linux-2.6.39.1/drivers/ata/pata_serverworks.c linux-2.6.39.1/drivers/ata/pata_serverworks.c --- linux-2.6.39.1/drivers/ata/pata_serverworks.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_serverworks.c 2011-05-22 19:36:31.000000000 -0400 @@ -300,7 +300,7 @@ static struct scsi_host_template serverw ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations serverworks_osb4_port_ops = { +static const struct ata_port_operations serverworks_osb4_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = serverworks_cable_detect, .mode_filter = serverworks_osb4_filter, @@ -308,7 +308,7 @@ static struct ata_port_operations server .set_dmamode = serverworks_set_dmamode, }; -static struct ata_port_operations serverworks_csb_port_ops = { +static const struct ata_port_operations serverworks_csb_port_ops = { .inherits = &serverworks_osb4_port_ops, .mode_filter = serverworks_csb_filter, }; diff -urNp linux-2.6.39.1/drivers/ata/pata_sil680.c linux-2.6.39.1/drivers/ata/pata_sil680.c --- linux-2.6.39.1/drivers/ata/pata_sil680.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_sil680.c 2011-05-22 19:36:31.000000000 -0400 @@ -225,8 +225,7 @@ static struct scsi_host_template sil680_ ATA_BMDMA_SHT(DRV_NAME), }; - -static struct ata_port_operations sil680_port_ops = { +static const struct ata_port_operations sil680_port_ops = { .inherits = &ata_bmdma32_port_ops, .sff_exec_command = sil680_sff_exec_command, .sff_irq_check = sil680_sff_irq_check, diff -urNp linux-2.6.39.1/drivers/ata/pata_sis.c linux-2.6.39.1/drivers/ata/pata_sis.c --- linux-2.6.39.1/drivers/ata/pata_sis.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_sis.c 2011-05-22 19:36:31.000000000 -0400 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations sis_133_for_sata_ops = { +static const struct ata_port_operations sis_133_for_sata_ops = { .inherits = &ata_bmdma_port_ops, .set_piomode = sis_133_set_piomode, .set_dmamode = sis_133_set_dmamode, .cable_detect = sis_133_cable_detect, }; -static struct ata_port_operations sis_base_ops = { +static const struct ata_port_operations sis_base_ops = { .inherits = &ata_bmdma_port_ops, .prereset = sis_pre_reset, }; -static struct ata_port_operations sis_133_ops = { +static const struct ata_port_operations sis_133_ops = { .inherits = &sis_base_ops, .set_piomode = sis_133_set_piomode, .set_dmamode = sis_133_set_dmamode, .cable_detect = sis_133_cable_detect, }; -static struct ata_port_operations sis_133_early_ops = { +static const struct ata_port_operations sis_133_early_ops = { .inherits = &sis_base_ops, .set_piomode = sis_100_set_piomode, .set_dmamode = sis_133_early_set_dmamode, .cable_detect = sis_66_cable_detect, }; -static struct ata_port_operations sis_100_ops = { +static const struct ata_port_operations sis_100_ops = { .inherits = &sis_base_ops, .set_piomode = sis_100_set_piomode, .set_dmamode = sis_100_set_dmamode, .cable_detect = sis_66_cable_detect, }; -static struct ata_port_operations sis_66_ops = { +static const struct ata_port_operations sis_66_ops = { .inherits = &sis_base_ops, .set_piomode = sis_old_set_piomode, .set_dmamode = sis_66_set_dmamode, .cable_detect = sis_66_cable_detect, }; -static struct ata_port_operations sis_old_ops = { +static const struct ata_port_operations sis_old_ops = { .inherits = &sis_base_ops, .set_piomode = sis_old_set_piomode, .set_dmamode = sis_old_set_dmamode, diff -urNp linux-2.6.39.1/drivers/ata/pata_sl82c105.c linux-2.6.39.1/drivers/ata/pata_sl82c105.c --- linux-2.6.39.1/drivers/ata/pata_sl82c105.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_sl82c105.c 2011-05-22 19:36:31.000000000 -0400 @@ -241,7 +241,7 @@ static struct scsi_host_template sl82c10 ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations sl82c105_port_ops = { +static const struct ata_port_operations sl82c105_port_ops = { .inherits = &ata_bmdma_port_ops, .qc_defer = sl82c105_qc_defer, .bmdma_start = sl82c105_bmdma_start, diff -urNp linux-2.6.39.1/drivers/ata/pata_triflex.c linux-2.6.39.1/drivers/ata/pata_triflex.c --- linux-2.6.39.1/drivers/ata/pata_triflex.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_triflex.c 2011-05-22 19:36:31.000000000 -0400 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations triflex_port_ops = { +static const struct ata_port_operations triflex_port_ops = { .inherits = &ata_bmdma_port_ops, .bmdma_start = triflex_bmdma_start, .bmdma_stop = triflex_bmdma_stop, diff -urNp linux-2.6.39.1/drivers/ata/pata_via.c linux-2.6.39.1/drivers/ata/pata_via.c --- linux-2.6.39.1/drivers/ata/pata_via.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pata_via.c 2011-05-22 19:36:31.000000000 -0400 @@ -441,7 +441,7 @@ static struct scsi_host_template via_sht ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations via_port_ops = { +static const struct ata_port_operations via_port_ops = { .inherits = &ata_bmdma_port_ops, .cable_detect = via_cable_detect, .set_piomode = via_set_piomode, @@ -452,7 +452,7 @@ static struct ata_port_operations via_po .mode_filter = via_mode_filter, }; -static struct ata_port_operations via_port_ops_noirq = { +static const struct ata_port_operations via_port_ops_noirq = { .inherits = &via_port_ops, .sff_data_xfer = ata_sff_data_xfer_noirq, }; diff -urNp linux-2.6.39.1/drivers/ata/pdc_adma.c linux-2.6.39.1/drivers/ata/pdc_adma.c --- linux-2.6.39.1/drivers/ata/pdc_adma.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/pdc_adma.c 2011-05-22 19:36:31.000000000 -0400 @@ -146,7 +146,7 @@ static struct scsi_host_template adma_at .dma_boundary = ADMA_DMA_BOUNDARY, }; -static struct ata_port_operations adma_ata_ops = { +static const struct ata_port_operations adma_ata_ops = { .inherits = &ata_sff_port_ops, .lost_interrupt = ATA_OP_NULL, diff -urNp linux-2.6.39.1/drivers/ata/sata_dwc_460ex.c linux-2.6.39.1/drivers/ata/sata_dwc_460ex.c --- linux-2.6.39.1/drivers/ata/sata_dwc_460ex.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/sata_dwc_460ex.c 2011-05-22 19:36:31.000000000 -0400 @@ -1598,7 +1598,7 @@ static struct scsi_host_template sata_dw .dma_boundary = ATA_DMA_BOUNDARY, }; -static struct ata_port_operations sata_dwc_ops = { +static const struct ata_port_operations sata_dwc_ops = { .inherits = &ata_sff_port_ops, .error_handler = sata_dwc_error_handler, diff -urNp linux-2.6.39.1/drivers/ata/sata_fsl.c linux-2.6.39.1/drivers/ata/sata_fsl.c --- linux-2.6.39.1/drivers/ata/sata_fsl.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/sata_fsl.c 2011-05-22 19:36:31.000000000 -0400 @@ -1268,7 +1268,7 @@ static struct scsi_host_template sata_fs .dma_boundary = ATA_DMA_BOUNDARY, }; -static struct ata_port_operations sata_fsl_ops = { +static const struct ata_port_operations sata_fsl_ops = { .inherits = &sata_pmp_port_ops, .qc_defer = ata_std_qc_defer, diff -urNp linux-2.6.39.1/drivers/ata/sata_inic162x.c linux-2.6.39.1/drivers/ata/sata_inic162x.c --- linux-2.6.39.1/drivers/ata/sata_inic162x.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/sata_inic162x.c 2011-05-22 19:36:31.000000000 -0400 @@ -705,7 +705,7 @@ static int inic_port_start(struct ata_po return 0; } -static struct ata_port_operations inic_port_ops = { +static const struct ata_port_operations inic_port_ops = { .inherits = &sata_port_ops, .check_atapi_dma = inic_check_atapi_dma, diff -urNp linux-2.6.39.1/drivers/ata/sata_mv.c linux-2.6.39.1/drivers/ata/sata_mv.c --- linux-2.6.39.1/drivers/ata/sata_mv.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/sata_mv.c 2011-05-22 19:36:31.000000000 -0400 @@ -662,7 +662,7 @@ static struct scsi_host_template mv6_sht .dma_boundary = MV_DMA_BOUNDARY, }; -static struct ata_port_operations mv5_ops = { +static const struct ata_port_operations mv5_ops = { .inherits = &ata_sff_port_ops, .lost_interrupt = ATA_OP_NULL, @@ -682,7 +682,7 @@ static struct ata_port_operations mv5_op .port_stop = mv_port_stop, }; -static struct ata_port_operations mv6_ops = { +static const struct ata_port_operations mv6_ops = { .inherits = &ata_bmdma_port_ops, .lost_interrupt = ATA_OP_NULL, @@ -716,7 +716,7 @@ static struct ata_port_operations mv6_op .port_stop = mv_port_stop, }; -static struct ata_port_operations mv_iie_ops = { +static const struct ata_port_operations mv_iie_ops = { .inherits = &mv6_ops, .dev_config = ATA_OP_NULL, .qc_prep = mv_qc_prep_iie, diff -urNp linux-2.6.39.1/drivers/ata/sata_nv.c linux-2.6.39.1/drivers/ata/sata_nv.c --- linux-2.6.39.1/drivers/ata/sata_nv.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/sata_nv.c 2011-05-22 19:36:31.000000000 -0400 @@ -465,7 +465,7 @@ static struct scsi_host_template nv_swnc * cases. Define nv_hardreset() which only kicks in for post-boot * probing and use it for all variants. */ -static struct ata_port_operations nv_generic_ops = { +static const struct ata_port_operations nv_generic_ops = { .inherits = &ata_bmdma_port_ops, .lost_interrupt = ATA_OP_NULL, .scr_read = nv_scr_read, @@ -473,20 +473,20 @@ static struct ata_port_operations nv_gen .hardreset = nv_hardreset, }; -static struct ata_port_operations nv_nf2_ops = { +static const struct ata_port_operations nv_nf2_ops = { .inherits = &nv_generic_ops, .freeze = nv_nf2_freeze, .thaw = nv_nf2_thaw, }; -static struct ata_port_operations nv_ck804_ops = { +static const struct ata_port_operations nv_ck804_ops = { .inherits = &nv_generic_ops, .freeze = nv_ck804_freeze, .thaw = nv_ck804_thaw, .host_stop = nv_ck804_host_stop, }; -static struct ata_port_operations nv_adma_ops = { +static const struct ata_port_operations nv_adma_ops = { .inherits = &nv_ck804_ops, .check_atapi_dma = nv_adma_check_atapi_dma, @@ -510,7 +510,7 @@ static struct ata_port_operations nv_adm .host_stop = nv_adma_host_stop, }; -static struct ata_port_operations nv_swncq_ops = { +static const struct ata_port_operations nv_swncq_ops = { .inherits = &nv_generic_ops, .qc_defer = ata_std_qc_defer, diff -urNp linux-2.6.39.1/drivers/ata/sata_promise.c linux-2.6.39.1/drivers/ata/sata_promise.c --- linux-2.6.39.1/drivers/ata/sata_promise.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/sata_promise.c 2011-05-22 19:36:31.000000000 -0400 @@ -194,7 +194,7 @@ static const struct ata_port_operations .error_handler = pdc_error_handler, }; -static struct ata_port_operations pdc_sata_ops = { +static const struct ata_port_operations pdc_sata_ops = { .inherits = &pdc_common_ops, .cable_detect = pdc_sata_cable_detect, .freeze = pdc_sata_freeze, @@ -207,14 +207,14 @@ static struct ata_port_operations pdc_sa /* First-generation chips need a more restrictive ->check_atapi_dma op, and ->freeze/thaw that ignore the hotplug controls. */ -static struct ata_port_operations pdc_old_sata_ops = { +static const struct ata_port_operations pdc_old_sata_ops = { .inherits = &pdc_sata_ops, .freeze = pdc_freeze, .thaw = pdc_thaw, .check_atapi_dma = pdc_old_sata_check_atapi_dma, }; -static struct ata_port_operations pdc_pata_ops = { +static const struct ata_port_operations pdc_pata_ops = { .inherits = &pdc_common_ops, .cable_detect = pdc_pata_cable_detect, .freeze = pdc_freeze, diff -urNp linux-2.6.39.1/drivers/ata/sata_qstor.c linux-2.6.39.1/drivers/ata/sata_qstor.c --- linux-2.6.39.1/drivers/ata/sata_qstor.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/sata_qstor.c 2011-05-22 19:36:31.000000000 -0400 @@ -131,7 +131,7 @@ static struct scsi_host_template qs_ata_ .dma_boundary = QS_DMA_BOUNDARY, }; -static struct ata_port_operations qs_ata_ops = { +static const struct ata_port_operations qs_ata_ops = { .inherits = &ata_sff_port_ops, .check_atapi_dma = qs_check_atapi_dma, diff -urNp linux-2.6.39.1/drivers/ata/sata_sil24.c linux-2.6.39.1/drivers/ata/sata_sil24.c --- linux-2.6.39.1/drivers/ata/sata_sil24.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/sata_sil24.c 2011-05-22 19:36:31.000000000 -0400 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s .dma_boundary = ATA_DMA_BOUNDARY, }; -static struct ata_port_operations sil24_ops = { +static const struct ata_port_operations sil24_ops = { .inherits = &sata_pmp_port_ops, .qc_defer = sil24_qc_defer, diff -urNp linux-2.6.39.1/drivers/ata/sata_sil.c linux-2.6.39.1/drivers/ata/sata_sil.c --- linux-2.6.39.1/drivers/ata/sata_sil.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/sata_sil.c 2011-05-22 19:36:31.000000000 -0400 @@ -181,7 +181,7 @@ static struct scsi_host_template sil_sht .sg_tablesize = ATA_MAX_PRD }; -static struct ata_port_operations sil_ops = { +static const struct ata_port_operations sil_ops = { .inherits = &ata_bmdma32_port_ops, .dev_config = sil_dev_config, .set_mode = sil_set_mode, diff -urNp linux-2.6.39.1/drivers/ata/sata_sis.c linux-2.6.39.1/drivers/ata/sata_sis.c --- linux-2.6.39.1/drivers/ata/sata_sis.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/sata_sis.c 2011-05-22 19:36:31.000000000 -0400 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations sis_ops = { +static const struct ata_port_operations sis_ops = { .inherits = &ata_bmdma_port_ops, .scr_read = sis_scr_read, .scr_write = sis_scr_write, diff -urNp linux-2.6.39.1/drivers/ata/sata_svw.c linux-2.6.39.1/drivers/ata/sata_svw.c --- linux-2.6.39.1/drivers/ata/sata_svw.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/sata_svw.c 2011-05-22 19:36:31.000000000 -0400 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata }; -static struct ata_port_operations k2_sata_ops = { +static const struct ata_port_operations k2_sata_ops = { .inherits = &ata_bmdma_port_ops, .sff_tf_load = k2_sata_tf_load, .sff_tf_read = k2_sata_tf_read, diff -urNp linux-2.6.39.1/drivers/ata/sata_sx4.c linux-2.6.39.1/drivers/ata/sata_sx4.c --- linux-2.6.39.1/drivers/ata/sata_sx4.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/sata_sx4.c 2011-05-22 19:36:31.000000000 -0400 @@ -249,7 +249,7 @@ static struct scsi_host_template pdc_sat }; /* TODO: inherit from base port_ops after converting to new EH */ -static struct ata_port_operations pdc_20621_ops = { +static const struct ata_port_operations pdc_20621_ops = { .inherits = &ata_sff_port_ops, .check_atapi_dma = pdc_check_atapi_dma, diff -urNp linux-2.6.39.1/drivers/ata/sata_uli.c linux-2.6.39.1/drivers/ata/sata_uli.c --- linux-2.6.39.1/drivers/ata/sata_uli.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/sata_uli.c 2011-05-22 19:36:31.000000000 -0400 @@ -80,7 +80,7 @@ static struct scsi_host_template uli_sht ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations uli_ops = { +static const struct ata_port_operations uli_ops = { .inherits = &ata_bmdma_port_ops, .scr_read = uli_scr_read, .scr_write = uli_scr_write, diff -urNp linux-2.6.39.1/drivers/ata/sata_via.c linux-2.6.39.1/drivers/ata/sata_via.c --- linux-2.6.39.1/drivers/ata/sata_via.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/sata_via.c 2011-05-22 19:36:31.000000000 -0400 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh ATA_BMDMA_SHT(DRV_NAME), }; -static struct ata_port_operations svia_base_ops = { +static const struct ata_port_operations svia_base_ops = { .inherits = &ata_bmdma_port_ops, .sff_tf_load = svia_tf_load, }; -static struct ata_port_operations vt6420_sata_ops = { +static const struct ata_port_operations vt6420_sata_ops = { .inherits = &svia_base_ops, .freeze = svia_noop_freeze, .prereset = vt6420_prereset, .bmdma_start = vt6420_bmdma_start, }; -static struct ata_port_operations vt6421_pata_ops = { +static const struct ata_port_operations vt6421_pata_ops = { .inherits = &svia_base_ops, .cable_detect = vt6421_pata_cable_detect, .set_piomode = vt6421_set_pio_mode, .set_dmamode = vt6421_set_dma_mode, }; -static struct ata_port_operations vt6421_sata_ops = { +static const struct ata_port_operations vt6421_sata_ops = { .inherits = &svia_base_ops, .scr_read = svia_scr_read, .scr_write = svia_scr_write, }; -static struct ata_port_operations vt8251_ops = { +static const struct ata_port_operations vt8251_ops = { .inherits = &svia_base_ops, .hardreset = sata_std_hardreset, .scr_read = vt8251_scr_read, diff -urNp linux-2.6.39.1/drivers/ata/sata_vsc.c linux-2.6.39.1/drivers/ata/sata_vsc.c --- linux-2.6.39.1/drivers/ata/sata_vsc.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ata/sata_vsc.c 2011-05-22 19:36:31.000000000 -0400 @@ -300,7 +300,7 @@ static struct scsi_host_template vsc_sat }; -static struct ata_port_operations vsc_sata_ops = { +static const struct ata_port_operations vsc_sata_ops = { .inherits = &ata_bmdma_port_ops, /* The IRQ handling is not quite standard SFF behaviour so we cannot use the default lost interrupt handler */ diff -urNp linux-2.6.39.1/drivers/atm/adummy.c linux-2.6.39.1/drivers/atm/adummy.c --- linux-2.6.39.1/drivers/atm/adummy.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/atm/adummy.c 2011-05-22 19:36:31.000000000 -0400 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); return 0; } diff -urNp linux-2.6.39.1/drivers/atm/ambassador.c linux-2.6.39.1/drivers/atm/ambassador.c --- linux-2.6.39.1/drivers/atm/ambassador.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/atm/ambassador.c 2011-05-22 19:36:31.000000000 -0400 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx); // VC layer stats - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); // free the descriptor kfree (tx_descr); @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, dump_skb ("<<<", vc, skb); // VC layer stats - atomic_inc(&atm_vcc->stats->rx); + atomic_inc_unchecked(&atm_vcc->stats->rx); __net_timestamp(skb); // end of our responsibility atm_vcc->push (atm_vcc, skb); @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, } else { PRINTK (KERN_INFO, "dropped over-size frame"); // should we count this? - atomic_inc(&atm_vcc->stats->rx_drop); + atomic_inc_unchecked(&atm_vcc->stats->rx_drop); } } else { @@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at } if (check_area (skb->data, skb->len)) { - atomic_inc(&atm_vcc->stats->tx_err); + atomic_inc_unchecked(&atm_vcc->stats->tx_err); return -ENOMEM; // ? } diff -urNp linux-2.6.39.1/drivers/atm/atmtcp.c linux-2.6.39.1/drivers/atm/atmtcp.c --- linux-2.6.39.1/drivers/atm/atmtcp.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/atm/atmtcp.c 2011-05-22 19:36:31.000000000 -0400 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); if (dev_data) return 0; - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -ENOLINK; } size = skb->len+sizeof(struct atmtcp_hdr); @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc if (!new_skb) { if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -ENOBUFS; } hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr)); @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); out_vcc->push(out_vcc,new_skb); - atomic_inc(&vcc->stats->tx); - atomic_inc(&out_vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->tx); + atomic_inc_unchecked(&out_vcc->stats->rx); return 0; } @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci)); read_unlock(&vcc_sklist_lock); if (!out_vcc) { - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); goto done; } skb_pull(skb,sizeof(struct atmtcp_hdr)); @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc __net_timestamp(new_skb); skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len); out_vcc->push(out_vcc,new_skb); - atomic_inc(&vcc->stats->tx); - atomic_inc(&out_vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->tx); + atomic_inc_unchecked(&out_vcc->stats->rx); done: if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb(skb); diff -urNp linux-2.6.39.1/drivers/atm/eni.c linux-2.6.39.1/drivers/atm/eni.c --- linux-2.6.39.1/drivers/atm/eni.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/atm/eni.c 2011-05-22 19:36:31.000000000 -0400 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc) DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n", vcc->dev->number); length = 0; - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); } else { length = ATM_CELL_SIZE-1; /* no HEC */ @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc) size); } eff = length = 0; - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); } else { size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2); @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc) "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n", vcc->dev->number,vcc->vci,length,size << 2,descr); length = eff = 0; - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); } } skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL; @@ -771,7 +771,7 @@ rx_dequeued++; vcc->push(vcc,skb); pushed++; } - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } wake_up(&eni_dev->rx_wait); } @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d PCI_DMA_TODEVICE); if (vcc->pop) vcc->pop(vcc,skb); else dev_kfree_skb_irq(skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); wake_up(&eni_dev->tx_wait); dma_complete++; } diff -urNp linux-2.6.39.1/drivers/atm/firestream.c linux-2.6.39.1/drivers/atm/firestream.c --- linux-2.6.39.1/drivers/atm/firestream.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/atm/firestream.c 2011-05-22 19:36:31.000000000 -0400 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct } } - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); fs_dprintk (FS_DEBUG_TXMEM, "i"); fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb); @@ -816,7 +816,7 @@ static void process_incoming (struct fs_ #endif skb_put (skb, qe->p1 & 0xffff); ATM_SKB(skb)->vcc = atm_vcc; - atomic_inc(&atm_vcc->stats->rx); + atomic_inc_unchecked(&atm_vcc->stats->rx); __net_timestamp(skb); fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb); atm_vcc->push (atm_vcc, skb); @@ -837,12 +837,12 @@ static void process_incoming (struct fs_ kfree (pe); } if (atm_vcc) - atomic_inc(&atm_vcc->stats->rx_drop); + atomic_inc_unchecked(&atm_vcc->stats->rx_drop); break; case 0x1f: /* Reassembly abort: no buffers. */ /* Silently increment error counter. */ if (atm_vcc) - atomic_inc(&atm_vcc->stats->rx_drop); + atomic_inc_unchecked(&atm_vcc->stats->rx_drop); break; default: /* Hmm. Haven't written the code to handle the others yet... -- REW */ printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n", diff -urNp linux-2.6.39.1/drivers/atm/fore200e.c linux-2.6.39.1/drivers/atm/fore200e.c --- linux-2.6.39.1/drivers/atm/fore200e.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/atm/fore200e.c 2011-05-22 19:36:31.000000000 -0400 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200 #endif /* check error condition */ if (*entry->status & STATUS_ERROR) - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); else - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); } } @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2 if (skb == NULL) { DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); return -ENOMEM; } @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2 dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); return -ENOMEM; } ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200 DPRINTK(2, "damaged PDU on %d.%d.%d\n", fore200e->atm_dev->number, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); } } @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc goto retry_here; } - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); fore200e->tx_sat++; DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n", diff -urNp linux-2.6.39.1/drivers/atm/he.c linux-2.6.39.1/drivers/atm/he.c --- linux-2.6.39.1/drivers/atm/he.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/atm/he.c 2011-05-22 19:36:31.000000000 -0400 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { hprintk("HBUF_ERR! (cid 0x%x)\n", cid); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); goto return_host_buffers; } @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i RBRQ_LEN_ERR(he_dev->rbrq_head) ? "LEN_ERR" : "", vcc->vpi, vcc->vci); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto return_host_buffers; } @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i vcc->push(vcc, skb); spin_lock(&he_dev->global_lock); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); return_host_buffers: ++pdus_assembled; @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str tpd->vcc->pop(tpd->vcc, tpd->skb); else dev_kfree_skb_any(tpd->skb); - atomic_inc(&tpd->vcc->stats->tx_err); + atomic_inc_unchecked(&tpd->vcc->stats->tx_err); } pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); return; @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -EINVAL; } @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -EINVAL; } #endif @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); spin_unlock_irqrestore(&he_dev->global_lock, flags); return -ENOMEM; } @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); spin_unlock_irqrestore(&he_dev->global_lock, flags); return -ENOMEM; } @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b __enqueue_tpd(he_dev, tpd, cid); spin_unlock_irqrestore(&he_dev->global_lock, flags); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); return 0; } diff -urNp linux-2.6.39.1/drivers/atm/horizon.c linux-2.6.39.1/drivers/atm/horizon.c --- linux-2.6.39.1/drivers/atm/horizon.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/atm/horizon.c 2011-05-22 19:36:31.000000000 -0400 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, { struct atm_vcc * vcc = ATM_SKB(skb)->vcc; // VC layer stats - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); __net_timestamp(skb); // end of our responsibility vcc->push (vcc, skb); @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev->tx_iovec = NULL; // VC layer stats - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); // free the skb hrz_kfree_skb (skb); diff -urNp linux-2.6.39.1/drivers/atm/idt77252.c linux-2.6.39.1/drivers/atm/idt77252.c --- linux-2.6.39.1/drivers/atm/idt77252.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/atm/idt77252.c 2011-05-22 19:36:31.000000000 -0400 @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str else dev_kfree_skb(skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); } atomic_dec(&scq->used); @@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st if ((sb = dev_alloc_skb(64)) == NULL) { printk("%s: Can't allocate buffers for aal0.\n", card->name); - atomic_add(i, &vcc->stats->rx_drop); + atomic_add_unchecked(i, &vcc->stats->rx_drop); break; } if (!atm_charge(vcc, sb->truesize)) { RXPRINTK("%s: atm_charge() dropped aal0 packets.\n", card->name); - atomic_add(i - 1, &vcc->stats->rx_drop); + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); dev_kfree_skb(sb); break; } @@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); cell += ATM_CELL_PAYLOAD; } @@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st "(CDC: %08x)\n", card->name, len, rpp->len, readl(SAR_REG_CDC)); recycle_rx_pool_skb(card, rpp); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); return; } if (stat & SAR_RSQE_CRC) { RXPRINTK("%s: AAL5 CRC error.\n", card->name); recycle_rx_pool_skb(card, rpp); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); return; } if (skb_queue_len(&rpp->queue) > 1) { @@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st RXPRINTK("%s: Can't alloc RX skb.\n", card->name); recycle_rx_pool_skb(card, rpp); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); return; } if (!atm_charge(vcc, skb->truesize)) { @@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st __net_timestamp(skb); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); return; } @@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st __net_timestamp(skb); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); if (skb->truesize > SAR_FB_SIZE_3) add_rx_skb(card, 3, SAR_FB_SIZE_3, 1); @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car if (vcc->qos.aal != ATM_AAL0) { RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n", card->name, vpi, vci); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); goto drop; } if ((sb = dev_alloc_skb(64)) == NULL) { printk("%s: Can't allocate buffers for AAL0.\n", card->name); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto drop; } @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); drop: skb_pull(queue, 64); @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s if (vc == NULL) { printk("%s: NULL connection in send().\n", card->name); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return -EINVAL; } if (!test_bit(VCF_TX, &vc->flags)) { printk("%s: Trying to transmit on a non-tx VC.\n", card->name); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return -EINVAL; } @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s break; default: printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return -EINVAL; } if (skb_shinfo(skb)->nr_frags != 0) { printk("%s: No scatter-gather yet.\n", card->name); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return -EINVAL; } @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s err = queue_skb(card, vc, skb, oam); if (err) { - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb(skb); return err; } @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v skb = dev_alloc_skb(64); if (!skb) { printk("%s: Out of memory in send_oam().\n", card->name); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); return -ENOMEM; } atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); diff -urNp linux-2.6.39.1/drivers/atm/iphase.c linux-2.6.39.1/drivers/atm/iphase.c --- linux-2.6.39.1/drivers/atm/iphase.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/atm/iphase.c 2011-05-22 19:36:31.000000000 -0400 @@ -1124,7 +1124,7 @@ static int rx_pkt(struct atm_dev *dev) status = (u_short) (buf_desc_ptr->desc_mode); if (status & (RX_CER | RX_PTE | RX_OFL)) { - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); IF_ERR(printk("IA: bad packet, dropping it");) if (status & RX_CER) { IF_ERR(printk(" cause: packet CRC error\n");) @@ -1147,7 +1147,7 @@ static int rx_pkt(struct atm_dev *dev) len = dma_addr - buf_addr; if (len > iadev->rx_buf_sz) { printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); goto out_free_desc; } @@ -1297,7 +1297,7 @@ static void rx_dle_intr(struct atm_dev * ia_vcc = INPH_IA_VCC(vcc); if (ia_vcc == NULL) { - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); dev_kfree_skb_any(skb); atm_return(vcc, atm_guess_pdu2truesize(len)); goto INCR_DLE; @@ -1309,7 +1309,7 @@ static void rx_dle_intr(struct atm_dev * if ((length > iadev->rx_buf_sz) || (length > (skb->len - sizeof(struct cpcs_trailer)))) { - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)", length, skb->len);) dev_kfree_skb_any(skb); @@ -1325,7 +1325,7 @@ static void rx_dle_intr(struct atm_dev * IF_RX(printk("rx_dle_intr: skb push");) vcc->push(vcc,skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); iadev->rx_pkt_cnt++; } INCR_DLE: @@ -2807,15 +2807,15 @@ static int ia_ioctl(struct atm_dev *dev, { struct k_sonet_stats *stats; stats = &PRIV(_ia_dev[board])->sonet_stats; - printk("section_bip: %d\n", atomic_read(&stats->section_bip)); - printk("line_bip : %d\n", atomic_read(&stats->line_bip)); - printk("path_bip : %d\n", atomic_read(&stats->path_bip)); - printk("line_febe : %d\n", atomic_read(&stats->line_febe)); - printk("path_febe : %d\n", atomic_read(&stats->path_febe)); - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs)); - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs)); - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells)); - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells)); + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip)); + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip)); + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip)); + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe)); + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe)); + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs)); + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs)); + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells)); + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells)); } ia_cmds.status = 0; break; @@ -2920,7 +2920,7 @@ static int ia_pkt_tx (struct atm_vcc *vc if ((desc == 0) || (desc > iadev->num_tx_desc)) { IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); if (vcc->pop) vcc->pop(vcc, skb); else @@ -3025,14 +3025,14 @@ static int ia_pkt_tx (struct atm_vcc *vc ATM_DESC(skb) = vcc->vci; skb_queue_tail(&iadev->tx_dma_q, skb); - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); iadev->tx_pkt_cnt++; /* Increment transaction counter */ writel(2, iadev->dma+IPHASE5575_TX_COUNTER); #if 0 /* add flow control logic */ - if (atomic_read(&vcc->stats->tx) % 20 == 0) { + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) { if (iavcc->vc_desc_cnt > 10) { vcc->tx_quota = vcc->tx_quota * 3 / 4; printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); diff -urNp linux-2.6.39.1/drivers/atm/lanai.c linux-2.6.39.1/drivers/atm/lanai.c --- linux-2.6.39.1/drivers/atm/lanai.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/atm/lanai.c 2011-05-22 19:36:31.000000000 -0400 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0); lanai_endtx(lanai, lvcc); lanai_free_skb(lvcc->tx.atmvcc, skb); - atomic_inc(&lvcc->tx.atmvcc->stats->tx); + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx); } /* Try to fill the buffer - don't call unless there is backlog */ @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc ATM_SKB(skb)->vcc = lvcc->rx.atmvcc; __net_timestamp(skb); lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb); - atomic_inc(&lvcc->rx.atmvcc->stats->rx); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx); out: lvcc->rx.buf.ptr = end; cardvcc_write(lvcc, endptr, vcc_rxreadptr); @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 " "vcc %d\n", lanai->number, (unsigned int) s, vci); lanai->stats.service_rxnotaal5++; - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); return 0; } if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) { @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d int bytes; read_unlock(&vcc_sklist_lock); DPRINTK("got trashed rx pdu on vci %d\n", vci); - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); lvcc->stats.x.aal5.service_trash++; bytes = (SERVICE_GET_END(s) * 16) - (((unsigned long) lvcc->rx.buf.ptr) - @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d } if (s & SERVICE_STREAM) { read_unlock(&vcc_sklist_lock); - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); lvcc->stats.x.aal5.service_stream++; printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream " "PDU on VCI %d!\n", lanai->number, vci); @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d return 0; } DPRINTK("got rx crc error on vci %d\n", vci); - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); lvcc->stats.x.aal5.service_rxcrc++; lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4]; cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr); diff -urNp linux-2.6.39.1/drivers/atm/nicstar.c linux-2.6.39.1/drivers/atm/nicstar.c --- linux-2.6.39.1/drivers/atm/nicstar.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/atm/nicstar.c 2011-05-22 19:36:31.000000000 -0400 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, if ((vc = (vc_map *) vcc->dev_data) == NULL) { printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, if (!vc->tx) { printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } if (skb_shinfo(skb)->nr_frags != 0) { printk("nicstar%d: No scatter-gather yet.\n", card->index); - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, } if (push_scqe(card, vc, scq, &scqe, skb) != 0) { - atomic_inc(&vcc->stats->tx_err); + atomic_inc_unchecked(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EIO; } - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); return 0; } @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns printk ("nicstar%d: Can't allocate buffers for aal0.\n", card->index); - atomic_add(i, &vcc->stats->rx_drop); + atomic_add_unchecked(i, &vcc->stats->rx_drop); break; } if (!atm_charge(vcc, sb->truesize)) { RXPRINTK ("nicstar%d: atm_charge() dropped aal0 packets.\n", card->index); - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ dev_kfree_skb_any(sb); break; } @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); cell += ATM_CELL_PAYLOAD; } @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns if (iovb == NULL) { printk("nicstar%d: Out of iovec buffers.\n", card->index); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); recycle_rx_buf(card, skb); return; } @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns small or large buffer itself. */ } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) { printk("nicstar%d: received too big AAL5 SDU.\n", card->index); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_MAX_IOVECS); NS_PRV_IOVCNT(iovb) = 0; @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns ("nicstar%d: Expected a small buffer, and this is not one.\n", card->index); which_list(card, skb); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); recycle_rx_buf(card, skb); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns ("nicstar%d: Expected a large buffer, and this is not one.\n", card->index); which_list(card, skb); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_PRV_IOVCNT(iovb)); vc->rx_iov = NULL; @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns printk(" - PDU size mismatch.\n"); else printk(".\n"); - atomic_inc(&vcc->stats->rx_err); + atomic_inc_unchecked(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, NS_PRV_IOVCNT(iovb)); vc->rx_iov = NULL; @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns /* skb points to a small buffer */ if (!atm_charge(vcc, skb->truesize)) { push_rxbufs(card, skb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); } else { skb_put(skb, len); dequeue_sm_buf(card, skb); @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */ struct sk_buff *sb; @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns if (len <= NS_SMBUFSIZE) { if (!atm_charge(vcc, sb->truesize)) { push_rxbufs(card, sb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); } else { skb_put(sb, len); dequeue_sm_buf(card, sb); @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } push_rxbufs(card, skb); @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns if (!atm_charge(vcc, skb->truesize)) { push_rxbufs(card, skb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); } else { dequeue_lg_buf(card, skb); #ifdef NS_USE_DESTRUCTORS @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } push_rxbufs(card, sb); @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns printk ("nicstar%d: Out of huge buffers.\n", card->index); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns card->hbpool.count++; } else dev_kfree_skb_any(hb); - atomic_inc(&vcc->stats->rx_drop); + atomic_inc_unchecked(&vcc->stats->rx_drop); } else { /* Copy the small buffer to the huge buffer */ sb = (struct sk_buff *)iov->iov_base; @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns #endif /* NS_USE_DESTRUCTORS */ __net_timestamp(hb); vcc->push(vcc, hb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } } diff -urNp linux-2.6.39.1/drivers/atm/solos-pci.c linux-2.6.39.1/drivers/atm/solos-pci.c --- linux-2.6.39.1/drivers/atm/solos-pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/atm/solos-pci.c 2011-05-22 19:36:31.000000000 -0400 @@ -715,7 +715,7 @@ void solos_bh(unsigned long card_arg) } atm_charge(vcc, skb->truesize); vcc->push(vcc, skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); break; case PKT_STATUS: @@ -900,6 +900,8 @@ static int print_buffer(struct sk_buff * char msg[500]; char item[10]; + pax_track_stack(); + len = buf->len; for (i = 0; i < len; i++){ if(i % 8 == 0) @@ -1009,7 +1011,7 @@ static uint32_t fpga_tx(struct solos_car vcc = SKB_CB(oldskb)->vcc; if (vcc) { - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); solos_pop(vcc, oldskb); } else dev_kfree_skb_irq(oldskb); diff -urNp linux-2.6.39.1/drivers/atm/suni.c linux-2.6.39.1/drivers/atm/suni.c --- linux-2.6.39.1/drivers/atm/suni.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/atm/suni.c 2011-05-22 19:36:31.000000000 -0400 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock); #define ADD_LIMITED(s,v) \ - atomic_add((v),&stats->s); \ - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX); + atomic_add_unchecked((v),&stats->s); \ + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX); static void suni_hz(unsigned long from_timer) diff -urNp linux-2.6.39.1/drivers/atm/uPD98402.c linux-2.6.39.1/drivers/atm/uPD98402.c --- linux-2.6.39.1/drivers/atm/uPD98402.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/atm/uPD98402.c 2011-05-22 19:36:31.000000000 -0400 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d struct sonet_stats tmp; int error = 0; - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs); + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs); sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp); if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp)); if (zero && !error) { @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev #define ADD_LIMITED(s,v) \ - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \ - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \ - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); } + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \ + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \ + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); } static void stat_event(struct atm_dev *dev) @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev if (reason & uPD98402_INT_PFM) stat_event(dev); if (reason & uPD98402_INT_PCO) { (void) GET(PCOCR); /* clear interrupt cause */ - atomic_add(GET(HECCT), + atomic_add_unchecked(GET(HECCT), &PRIV(dev)->sonet_stats.uncorr_hcs); } if ((reason & uPD98402_INT_RFO) && @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO | uPD98402_INT_LOS),PIMR); /* enable them */ (void) fetch_stats(dev,NULL,1); /* clear kernel counters */ - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1); - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1); - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1); + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1); + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1); + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1); return 0; } diff -urNp linux-2.6.39.1/drivers/atm/zatm.c linux-2.6.39.1/drivers/atm/zatm.c --- linux-2.6.39.1/drivers/atm/zatm.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/atm/zatm.c 2011-05-22 19:36:31.000000000 -0400 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy } if (!size) { dev_kfree_skb_irq(skb); - if (vcc) atomic_inc(&vcc->stats->rx_err); + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err); continue; } if (!atm_charge(vcc,skb->truesize)) { @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy skb->len = size; ATM_SKB(skb)->vcc = vcc; vcc->push(vcc,skb); - atomic_inc(&vcc->stats->rx); + atomic_inc_unchecked(&vcc->stats->rx); } zout(pos & 0xffff,MTA(mbx)); #if 0 /* probably a stupid idea */ @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD skb_queue_head(&zatm_vcc->backlog,skb); break; } - atomic_inc(&vcc->stats->tx); + atomic_inc_unchecked(&vcc->stats->tx); wake_up(&zatm_vcc->tx_wait); } diff -urNp linux-2.6.39.1/drivers/base/iommu.c linux-2.6.39.1/drivers/base/iommu.c --- linux-2.6.39.1/drivers/base/iommu.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/base/iommu.c 2011-05-22 19:36:31.000000000 -0400 @@ -23,9 +23,8 @@ #include #include -static struct iommu_ops *iommu_ops; - -void register_iommu(struct iommu_ops *ops) +static const struct iommu_ops *iommu_ops; +void register_iommu(const struct iommu_ops *ops) { if (iommu_ops) BUG(); diff -urNp linux-2.6.39.1/drivers/base/power/generic_ops.c linux-2.6.39.1/drivers/base/power/generic_ops.c --- linux-2.6.39.1/drivers/base/power/generic_ops.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/base/power/generic_ops.c 2011-05-22 19:36:31.000000000 -0400 @@ -215,7 +215,7 @@ int pm_generic_restore(struct device *de EXPORT_SYMBOL_GPL(pm_generic_restore); #endif /* CONFIG_PM_SLEEP */ -struct dev_pm_ops generic_subsys_pm_ops = { +const struct dev_pm_ops generic_subsys_pm_ops = { #ifdef CONFIG_PM_SLEEP .suspend = pm_generic_suspend, .resume = pm_generic_resume, diff -urNp linux-2.6.39.1/drivers/base/power/wakeup.c linux-2.6.39.1/drivers/base/power/wakeup.c --- linux-2.6.39.1/drivers/base/power/wakeup.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/base/power/wakeup.c 2011-05-22 19:36:31.000000000 -0400 @@ -29,14 +29,14 @@ bool events_check_enabled; * They need to be modified together atomically, so it's better to use one * atomic variable to hold them both. */ -static atomic_t combined_event_count = ATOMIC_INIT(0); +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0); #define IN_PROGRESS_BITS (sizeof(int) * 4) #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1) static void split_counters(unsigned int *cnt, unsigned int *inpr) { - unsigned int comb = atomic_read(&combined_event_count); + unsigned int comb = atomic_read_unchecked(&combined_event_count); *cnt = (comb >> IN_PROGRESS_BITS); *inpr = comb & MAX_IN_PROGRESS; @@ -351,7 +351,7 @@ static void wakeup_source_activate(struc ws->last_time = ktime_get(); /* Increment the counter of events in progress. */ - atomic_inc(&combined_event_count); + atomic_inc_unchecked(&combined_event_count); } /** @@ -441,7 +441,7 @@ static void wakeup_source_deactivate(str * Increment the counter of registered wakeup events and decrement the * couter of wakeup events in progress simultaneously. */ - atomic_add(MAX_IN_PROGRESS, &combined_event_count); + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count); } /** diff -urNp linux-2.6.39.1/drivers/block/cciss.c linux-2.6.39.1/drivers/block/cciss.c --- linux-2.6.39.1/drivers/block/cciss.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/block/cciss.c 2011-05-22 19:41:32.000000000 -0400 @@ -103,7 +103,7 @@ MODULE_DEVICE_TABLE(pci, cciss_pci_devic * product = Marketing Name for the board * access = Address of the struct of function pointers */ -static struct board_type products[] = { +static const struct board_type products[] = { {0x40700E11, "Smart Array 5300", &SA5_access}, {0x40800E11, "Smart Array 5i", &SA5B_access}, {0x40820E11, "Smart Array 532", &SA5B_access}, @@ -1151,6 +1151,8 @@ static int cciss_ioctl32_passthru(struct int err; u32 cp; + memset(&arg64, 0, sizeof(arg64)); + err = 0; err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, diff -urNp linux-2.6.39.1/drivers/block/cciss.h linux-2.6.39.1/drivers/block/cciss.h --- linux-2.6.39.1/drivers/block/cciss.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/block/cciss.h 2011-05-22 19:36:31.000000000 -0400 @@ -393,7 +393,7 @@ static bool SA5_performant_intr_pending( return register_value & SA5_OUTDB_STATUS_PERF_BIT; } -static struct access_method SA5_access = { +static const struct access_method SA5_access = { SA5_submit_command, SA5_intr_mask, SA5_fifo_full, @@ -401,7 +401,7 @@ static struct access_method SA5_access = SA5_completed, }; -static struct access_method SA5B_access = { +static const struct access_method SA5B_access = { SA5_submit_command, SA5B_intr_mask, SA5_fifo_full, @@ -409,7 +409,7 @@ static struct access_method SA5B_access SA5_completed, }; -static struct access_method SA5_performant_access = { +static const struct access_method SA5_performant_access = { SA5_submit_command, SA5_performant_intr_mask, SA5_fifo_full, @@ -420,7 +420,7 @@ static struct access_method SA5_performa struct board_type { __u32 board_id; char *product_name; - struct access_method *access; + const struct access_method *access; int nr_cmds; /* Max cmds this kind of ctlr can handle. */ }; diff -urNp linux-2.6.39.1/drivers/block/cpqarray.c linux-2.6.39.1/drivers/block/cpqarray.c --- linux-2.6.39.1/drivers/block/cpqarray.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/block/cpqarray.c 2011-05-22 19:36:31.000000000 -0400 @@ -80,7 +80,7 @@ static int eisa[8]; * product = Marketing Name for the board * access = Address of the struct of function pointers */ -static struct board_type products[] = { +static const struct board_type products[] = { { 0x0040110E, "IDA", &smart1_access }, { 0x0140110E, "IDA-2", &smart1_access }, { 0x1040110E, "IAES", &smart1_access }, @@ -911,6 +911,8 @@ static void do_ida_request(struct reques struct scatterlist tmp_sg[SG_MAX]; int i, dir, seg; + pax_track_stack(); + queue_next: creq = blk_peek_request(q); if (!creq) diff -urNp linux-2.6.39.1/drivers/block/cpqarray.h linux-2.6.39.1/drivers/block/cpqarray.h --- linux-2.6.39.1/drivers/block/cpqarray.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/block/cpqarray.h 2011-05-22 19:36:31.000000000 -0400 @@ -69,7 +69,7 @@ struct access_method { struct board_type { __u32 board_id; char *product_name; - struct access_method *access; + const struct access_method *access; }; struct ctlr_info { diff -urNp linux-2.6.39.1/drivers/block/DAC960.c linux-2.6.39.1/drivers/block/DAC960.c --- linux-2.6.39.1/drivers/block/DAC960.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/block/DAC960.c 2011-05-22 19:36:31.000000000 -0400 @@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur unsigned long flags; int Channel, TargetID; + pax_track_stack(); + if (!init_dma_loaf(Controller->PCIDevice, &local_dma, DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) + sizeof(DAC960_SCSI_Inquiry_T) + diff -urNp linux-2.6.39.1/drivers/block/drbd/drbd_int.h linux-2.6.39.1/drivers/block/drbd/drbd_int.h --- linux-2.6.39.1/drivers/block/drbd/drbd_int.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/block/drbd/drbd_int.h 2011-05-22 19:36:31.000000000 -0400 @@ -736,7 +736,7 @@ struct drbd_request; struct drbd_epoch { struct list_head list; unsigned int barrier_nr; - atomic_t epoch_size; /* increased on every request added. */ + atomic_unchecked_t epoch_size; /* increased on every request added. */ atomic_t active; /* increased on every req. added, and dec on every finished. */ unsigned long flags; }; @@ -1108,7 +1108,7 @@ struct drbd_conf { void *int_dig_in; void *int_dig_vv; wait_queue_head_t seq_wait; - atomic_t packet_seq; + atomic_unchecked_t packet_seq; unsigned int peer_seq; spinlock_t peer_seq_lock; unsigned int minor; diff -urNp linux-2.6.39.1/drivers/block/drbd/drbd_main.c linux-2.6.39.1/drivers/block/drbd/drbd_main.c --- linux-2.6.39.1/drivers/block/drbd/drbd_main.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/block/drbd/drbd_main.c 2011-05-22 19:36:31.000000000 -0400 @@ -2387,7 +2387,7 @@ static int _drbd_send_ack(struct drbd_co p.sector = sector; p.block_id = block_id; p.blksize = blksize; - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq)); + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq)); if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED) return false; @@ -2686,7 +2686,7 @@ int drbd_send_dblock(struct drbd_conf *m p.sector = cpu_to_be64(req->sector); p.block_id = (unsigned long)req; p.seq_num = cpu_to_be32(req->seq_num = - atomic_add_return(1, &mdev->packet_seq)); + atomic_add_return_unchecked(1, &mdev->packet_seq)); dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw); @@ -2971,7 +2971,7 @@ void drbd_init_set_defaults(struct drbd_ atomic_set(&mdev->unacked_cnt, 0); atomic_set(&mdev->local_cnt, 0); atomic_set(&mdev->net_cnt, 0); - atomic_set(&mdev->packet_seq, 0); + atomic_set_unchecked(&mdev->packet_seq, 0); atomic_set(&mdev->pp_in_use, 0); atomic_set(&mdev->pp_in_use_by_net, 0); atomic_set(&mdev->rs_sect_in, 0); @@ -3051,8 +3051,8 @@ void drbd_mdev_cleanup(struct drbd_conf mdev->receiver.t_state); /* no need to lock it, I'm the only thread alive */ - if (atomic_read(&mdev->current_epoch->epoch_size) != 0) - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size)); + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0) + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size)); mdev->al_writ_cnt = mdev->bm_writ_cnt = mdev->read_cnt = diff -urNp linux-2.6.39.1/drivers/block/drbd/drbd_nl.c linux-2.6.39.1/drivers/block/drbd/drbd_nl.c --- linux-2.6.39.1/drivers/block/drbd/drbd_nl.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/block/drbd/drbd_nl.c 2011-05-22 19:36:31.000000000 -0400 @@ -2298,7 +2298,7 @@ static void drbd_connector_callback(stru module_put(THIS_MODULE); } -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */ +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */ static unsigned short * __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, @@ -2369,7 +2369,7 @@ void drbd_bcast_state(struct drbd_conf * cn_reply->id.idx = CN_IDX_DRBD; cn_reply->id.val = CN_VAL_DRBD; - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq); cn_reply->ack = 0; /* not used here. */ cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + (int)((char *)tl - (char *)reply->tag_list); @@ -2401,7 +2401,7 @@ void drbd_bcast_ev_helper(struct drbd_co cn_reply->id.idx = CN_IDX_DRBD; cn_reply->id.val = CN_VAL_DRBD; - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq); cn_reply->ack = 0; /* not used here. */ cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + (int)((char *)tl - (char *)reply->tag_list); @@ -2479,7 +2479,7 @@ void drbd_bcast_ee(struct drbd_conf *mde cn_reply->id.idx = CN_IDX_DRBD; cn_reply->id.val = CN_VAL_DRBD; - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq); + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq); cn_reply->ack = 0; // not used here. cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + (int)((char*)tl - (char*)reply->tag_list); @@ -2518,7 +2518,7 @@ void drbd_bcast_sync_progress(struct drb cn_reply->id.idx = CN_IDX_DRBD; cn_reply->id.val = CN_VAL_DRBD; - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq); cn_reply->ack = 0; /* not used here. */ cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + (int)((char *)tl - (char *)reply->tag_list); diff -urNp linux-2.6.39.1/drivers/block/drbd/drbd_receiver.c linux-2.6.39.1/drivers/block/drbd/drbd_receiver.c --- linux-2.6.39.1/drivers/block/drbd/drbd_receiver.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/block/drbd/drbd_receiver.c 2011-05-22 19:36:31.000000000 -0400 @@ -894,7 +894,7 @@ retry: sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10; sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; - atomic_set(&mdev->packet_seq, 0); + atomic_set_unchecked(&mdev->packet_seq, 0); mdev->peer_seq = 0; drbd_thread_start(&mdev->asender); @@ -990,7 +990,7 @@ static enum finish_epoch drbd_may_finish do { next_epoch = NULL; - epoch_size = atomic_read(&epoch->epoch_size); + epoch_size = atomic_read_unchecked(&epoch->epoch_size); switch (ev & ~EV_CLEANUP) { case EV_PUT: @@ -1025,7 +1025,7 @@ static enum finish_epoch drbd_may_finish rv = FE_DESTROYED; } else { epoch->flags = 0; - atomic_set(&epoch->epoch_size, 0); + atomic_set_unchecked(&epoch->epoch_size, 0); /* atomic_set(&epoch->active, 0); is already zero */ if (rv == FE_STILL_LIVE) rv = FE_RECYCLED; @@ -1196,14 +1196,14 @@ static int receive_Barrier(struct drbd_c drbd_wait_ee_list_empty(mdev, &mdev->active_ee); drbd_flush(mdev); - if (atomic_read(&mdev->current_epoch->epoch_size)) { + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) { epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); if (epoch) break; } epoch = mdev->current_epoch; - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0); + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0); D_ASSERT(atomic_read(&epoch->active) == 0); D_ASSERT(epoch->flags == 0); @@ -1215,11 +1215,11 @@ static int receive_Barrier(struct drbd_c } epoch->flags = 0; - atomic_set(&epoch->epoch_size, 0); + atomic_set_unchecked(&epoch->epoch_size, 0); atomic_set(&epoch->active, 0); spin_lock(&mdev->epoch_lock); - if (atomic_read(&mdev->current_epoch->epoch_size)) { + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) { list_add(&epoch->list, &mdev->current_epoch->list); mdev->current_epoch = epoch; mdev->epochs++; @@ -1668,7 +1668,7 @@ static int receive_Data(struct drbd_conf spin_unlock(&mdev->peer_seq_lock); drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size); - atomic_inc(&mdev->current_epoch->epoch_size); + atomic_inc_unchecked(&mdev->current_epoch->epoch_size); return drbd_drain_block(mdev, data_size); } @@ -1694,7 +1694,7 @@ static int receive_Data(struct drbd_conf spin_lock(&mdev->epoch_lock); e->epoch = mdev->current_epoch; - atomic_inc(&e->epoch->epoch_size); + atomic_inc_unchecked(&e->epoch->epoch_size); atomic_inc(&e->epoch->active); spin_unlock(&mdev->epoch_lock); @@ -3905,7 +3905,7 @@ static void drbd_disconnect(struct drbd_ D_ASSERT(list_empty(&mdev->done_ee)); /* ok, no more ee's on the fly, it is safe to reset the epoch_size */ - atomic_set(&mdev->current_epoch->epoch_size, 0); + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0); D_ASSERT(list_empty(&mdev->current_epoch->list)); } diff -urNp linux-2.6.39.1/drivers/block/nbd.c linux-2.6.39.1/drivers/block/nbd.c --- linux-2.6.39.1/drivers/block/nbd.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/block/nbd.c 2011-05-22 19:36:31.000000000 -0400 @@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device * struct kvec iov; sigset_t blocked, oldset; + pax_track_stack(); + if (unlikely(!sock)) { printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n", lo->disk->disk_name, (send ? "send" : "recv")); @@ -571,6 +573,8 @@ static void do_nbd_request(struct reques static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo, unsigned int cmd, unsigned long arg) { + pax_track_stack(); + switch (cmd) { case NBD_DISCONNECT: { struct request sreq; diff -urNp linux-2.6.39.1/drivers/block/smart1,2.h linux-2.6.39.1/drivers/block/smart1,2.h --- linux-2.6.39.1/drivers/block/smart1,2.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/block/smart1,2.h 2011-05-22 19:36:31.000000000 -0400 @@ -107,7 +107,7 @@ static unsigned long smart4_intr_pending return 0 ; } -static struct access_method smart4_access = { +static const struct access_method smart4_access = { smart4_submit_command, smart4_intr_mask, smart4_fifo_full, @@ -143,7 +143,7 @@ static unsigned long smart2_intr_pending return readl(h->vaddr + INTR_PENDING); } -static struct access_method smart2_access = { +static const struct access_method smart2_access = { smart2_submit_command, smart2_intr_mask, smart2_fifo_full, @@ -179,7 +179,7 @@ static unsigned long smart2e_intr_pendin return inl(h->io_mem_addr + INTR_PENDING); } -static struct access_method smart2e_access = { +static const struct access_method smart2e_access = { smart2e_submit_command, smart2e_intr_mask, smart2e_fifo_full, @@ -269,7 +269,7 @@ static unsigned long smart1_intr_pending return chan; } -static struct access_method smart1_access = { +static const struct access_method smart1_access = { smart1_submit_command, smart1_intr_mask, smart1_fifo_full, diff -urNp linux-2.6.39.1/drivers/block/xsysace.c linux-2.6.39.1/drivers/block/xsysace.c --- linux-2.6.39.1/drivers/block/xsysace.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/block/xsysace.c 2011-05-22 19:36:31.000000000 -0400 @@ -262,7 +262,7 @@ static void ace_dataout_8(struct ace_dev ace->data_ptr = src; } -static struct ace_reg_ops ace_reg_8_ops = { +static const struct ace_reg_ops ace_reg_8_ops = { .in = ace_in_8, .out = ace_out_8, .datain = ace_datain_8, @@ -327,14 +327,14 @@ static void ace_dataout_le16(struct ace_ ace->data_ptr = src; } -static struct ace_reg_ops ace_reg_be16_ops = { +static const struct ace_reg_ops ace_reg_be16_ops = { .in = ace_in_be16, .out = ace_out_be16, .datain = ace_datain_be16, .dataout = ace_dataout_be16, }; -static struct ace_reg_ops ace_reg_le16_ops = { +static const struct ace_reg_ops ace_reg_le16_ops = { .in = ace_in_le16, .out = ace_out_le16, .datain = ace_datain_le16, diff -urNp linux-2.6.39.1/drivers/char/agp/frontend.c linux-2.6.39.1/drivers/char/agp/frontend.c --- linux-2.6.39.1/drivers/char/agp/frontend.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/agp/frontend.c 2011-05-22 19:36:31.000000000 -0400 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag if (copy_from_user(&reserve, arg, sizeof(struct agp_region))) return -EFAULT; - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment)) + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv)) return -EFAULT; client = agp_find_client_by_pid(reserve.pid); diff -urNp linux-2.6.39.1/drivers/char/briq_panel.c linux-2.6.39.1/drivers/char/briq_panel.c --- linux-2.6.39.1/drivers/char/briq_panel.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/briq_panel.c 2011-05-22 19:41:32.000000000 -0400 @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -34,6 +35,7 @@ static int vfd_is_open; static unsigned char vfd[40]; static int vfd_cursor; static unsigned char ledpb, led; +static DEFINE_MUTEX(vfd_mutex); static void update_vfd(void) { @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f if (!vfd_is_open) return -EBUSY; + mutex_lock(&vfd_mutex); for (;;) { char c; if (!indx) break; - if (get_user(c, buf)) + if (get_user(c, buf)) { + mutex_unlock(&vfd_mutex); return -EFAULT; + } if (esc) { set_led(c); esc = 0; @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f buf++; } update_vfd(); + mutex_unlock(&vfd_mutex); return len; } diff -urNp linux-2.6.39.1/drivers/char/genrtc.c linux-2.6.39.1/drivers/char/genrtc.c --- linux-2.6.39.1/drivers/char/genrtc.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/genrtc.c 2011-05-22 19:41:32.000000000 -0400 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi switch (cmd) { case RTC_PLL_GET: + memset(&pll, 0, sizeof(pll)); if (get_rtc_pll(&pll)) return -EINVAL; else diff -urNp linux-2.6.39.1/drivers/char/hpet.c linux-2.6.39.1/drivers/char/hpet.c --- linux-2.6.39.1/drivers/char/hpet.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/hpet.c 2011-05-22 19:36:31.000000000 -0400 @@ -553,7 +553,7 @@ static inline unsigned long hpet_time_di } static int -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, struct hpet_info *info) { struct hpet_timer __iomem *timer; diff -urNp linux-2.6.39.1/drivers/char/ipmi/ipmi_devintf.c linux-2.6.39.1/drivers/char/ipmi/ipmi_devintf.c --- linux-2.6.39.1/drivers/char/ipmi/ipmi_devintf.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/ipmi/ipmi_devintf.c 2011-05-22 19:36:31.000000000 -0400 @@ -109,8 +109,7 @@ static int ipmi_fasync(int fd, struct fi return (result); } -static struct ipmi_user_hndl ipmi_hndlrs = -{ +static const struct ipmi_user_hndl ipmi_hndlrs = { .ipmi_recv_hndl = file_receive_handler, }; diff -urNp linux-2.6.39.1/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.39.1/drivers/char/ipmi/ipmi_msghandler.c --- linux-2.6.39.1/drivers/char/ipmi/ipmi_msghandler.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/ipmi/ipmi_msghandler.c 2011-05-22 19:36:31.000000000 -0400 @@ -82,7 +82,7 @@ struct ipmi_user { struct kref refcount; /* The upper layer that handles receive messages. */ - struct ipmi_user_hndl *handler; + const struct ipmi_user_hndl *handler; void *handler_data; /* The interface this user is bound to. */ @@ -414,7 +414,7 @@ struct ipmi_smi { struct proc_dir_entry *proc_dir; char proc_dir_name[10]; - atomic_t stats[IPMI_NUM_STATS]; + atomic_unchecked_t stats[IPMI_NUM_STATS]; /* * run_to_completion duplicate of smb_info, smi_info @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex); #define ipmi_inc_stat(intf, stat) \ - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]) #define ipmi_get_stat(intf, stat) \ - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])) static int is_lan_addr(struct ipmi_addr *addr) { @@ -875,7 +875,7 @@ static int intf_err_seq(ipmi_smi_t int int ipmi_create_user(unsigned int if_num, - struct ipmi_user_hndl *handler, + const struct ipmi_user_hndl *handler, void *handler_data, ipmi_user_t *user) { @@ -2844,7 +2844,7 @@ int ipmi_register_smi(struct ipmi_smi_ha INIT_LIST_HEAD(&intf->cmd_rcvrs); init_waitqueue_head(&intf->waitq); for (i = 0; i < IPMI_NUM_STATS; i++) - atomic_set(&intf->stats[i], 0); + atomic_set_unchecked(&intf->stats[i], 0); intf->proc_dir = NULL; @@ -4196,6 +4196,8 @@ static void send_panic_events(char *str) struct ipmi_smi_msg smi_msg; struct ipmi_recv_msg recv_msg; + pax_track_stack(); + si = (struct ipmi_system_interface_addr *) &addr; si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; si->channel = IPMI_BMC_CHANNEL; diff -urNp linux-2.6.39.1/drivers/char/ipmi/ipmi_poweroff.c linux-2.6.39.1/drivers/char/ipmi/ipmi_poweroff.c --- linux-2.6.39.1/drivers/char/ipmi/ipmi_poweroff.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/ipmi/ipmi_poweroff.c 2011-05-22 19:36:31.000000000 -0400 @@ -133,7 +133,7 @@ static void receive_handler(struct ipmi_ complete(comp); } -static struct ipmi_user_hndl ipmi_poweroff_handler = { +static const struct ipmi_user_hndl ipmi_poweroff_handler = { .ipmi_recv_hndl = receive_handler }; diff -urNp linux-2.6.39.1/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.39.1/drivers/char/ipmi/ipmi_si_intf.c --- linux-2.6.39.1/drivers/char/ipmi/ipmi_si_intf.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/ipmi/ipmi_si_intf.c 2011-05-22 19:36:31.000000000 -0400 @@ -276,7 +276,7 @@ struct smi_info { unsigned char slave_addr; /* Counters and things for the proc filesystem. */ - atomic_t stats[SI_NUM_STATS]; + atomic_unchecked_t stats[SI_NUM_STATS]; struct task_struct *thread; @@ -285,9 +285,9 @@ struct smi_info { }; #define smi_inc_stat(smi, stat) \ - atomic_inc(&(smi)->stats[SI_STAT_ ## stat]) + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat]) #define smi_get_stat(smi, stat) \ - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat])) + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat])) #define SI_MAX_PARMS 4 @@ -3198,7 +3198,7 @@ static int try_smi_init(struct smi_info atomic_set(&new_smi->req_events, 0); new_smi->run_to_completion = 0; for (i = 0; i < SI_NUM_STATS; i++) - atomic_set(&new_smi->stats[i], 0); + atomic_set_unchecked(&new_smi->stats[i], 0); new_smi->interrupt_disabled = 1; atomic_set(&new_smi->stop_operation, 0); diff -urNp linux-2.6.39.1/drivers/char/ipmi/ipmi_watchdog.c linux-2.6.39.1/drivers/char/ipmi/ipmi_watchdog.c --- linux-2.6.39.1/drivers/char/ipmi/ipmi_watchdog.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/ipmi/ipmi_watchdog.c 2011-05-22 19:36:31.000000000 -0400 @@ -216,7 +216,7 @@ static int set_param_timeout(const char return rv; } -static struct kernel_param_ops param_ops_timeout = { +static const struct kernel_param_ops param_ops_timeout = { .set = set_param_timeout, .get = param_get_int, }; @@ -278,14 +278,14 @@ static int set_param_wdog_ifnum(const ch return 0; } -static struct kernel_param_ops param_ops_wdog_ifnum = { +static const struct kernel_param_ops param_ops_wdog_ifnum = { .set = set_param_wdog_ifnum, .get = param_get_int, }; #define param_check_wdog_ifnum param_check_int -static struct kernel_param_ops param_ops_str = { +static const struct kernel_param_ops param_ops_str = { .set = set_param_str, .get = get_param_str, }; @@ -953,7 +953,7 @@ static void ipmi_wdog_pretimeout_handler pretimeout_since_last_heartbeat = 1; } -static struct ipmi_user_hndl ipmi_hndlrs = { +static const struct ipmi_user_hndl ipmi_hndlrs = { .ipmi_recv_hndl = ipmi_wdog_msg_handler, .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler }; diff -urNp linux-2.6.39.1/drivers/char/Kconfig linux-2.6.39.1/drivers/char/Kconfig --- linux-2.6.39.1/drivers/char/Kconfig 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/Kconfig 2011-05-22 19:41:37.000000000 -0400 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig" config DEVKMEM bool "/dev/kmem virtual device support" - default y + default n + depends on !GRKERNSEC_KMEM help Say Y here if you want to support the /dev/kmem device. The /dev/kmem device is rarely used, but can be used for certain @@ -596,6 +597,7 @@ config DEVPORT bool depends on !M68K depends on ISA || PCI + depends on !GRKERNSEC_KMEM default y source "drivers/s390/char/Kconfig" diff -urNp linux-2.6.39.1/drivers/char/mem.c linux-2.6.39.1/drivers/char/mem.c --- linux-2.6.39.1/drivers/char/mem.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/mem.c 2011-05-22 19:41:37.000000000 -0400 @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -34,6 +35,10 @@ # include #endif +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC) +extern struct file_operations grsec_fops; +#endif + static inline unsigned long size_inside_page(unsigned long start, unsigned long size) { @@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig while (cursor < to) { if (!devmem_is_allowed(pfn)) { +#ifdef CONFIG_GRKERNSEC_KMEM + gr_handle_mem_readwrite(from, to); +#else printk(KERN_INFO "Program %s tried to access /dev/mem between %Lx->%Lx.\n", current->comm, from, to); +#endif return 0; } cursor += PAGE_SIZE; @@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig } return 1; } +#elif defined(CONFIG_GRKERNSEC_KMEM) +static inline int range_is_allowed(unsigned long pfn, unsigned long size) +{ + return 0; +} #else static inline int range_is_allowed(unsigned long pfn, unsigned long size) { @@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil while (count > 0) { unsigned long remaining; + char *temp; sz = size_inside_page(p, count); @@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil if (!ptr) return -EFAULT; - remaining = copy_to_user(buf, ptr, sz); +#ifdef CONFIG_PAX_USERCOPY + temp = kmalloc(sz, GFP_KERNEL); + if (!temp) { + unxlate_dev_mem_ptr(p, ptr); + return -ENOMEM; + } + memcpy(temp, ptr, sz); +#else + temp = ptr; +#endif + + remaining = copy_to_user(buf, temp, sz); + +#ifdef CONFIG_PAX_USERCOPY + kfree(temp); +#endif + unxlate_dev_mem_ptr(p, ptr); if (remaining) return -EFAULT; @@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi size_t count, loff_t *ppos) { unsigned long p = *ppos; - ssize_t low_count, read, sz; + ssize_t low_count, read, sz, err = 0; char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ - int err = 0; read = 0; if (p < (unsigned long) high_memory) { @@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi } #endif while (low_count > 0) { + char *temp; + sz = size_inside_page(p, low_count); /* @@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi */ kbuf = xlate_dev_kmem_ptr((char *)p); - if (copy_to_user(buf, kbuf, sz)) +#ifdef CONFIG_PAX_USERCOPY + temp = kmalloc(sz, GFP_KERNEL); + if (!temp) + return -ENOMEM; + memcpy(temp, kbuf, sz); +#else + temp = kbuf; +#endif + + err = copy_to_user(buf, temp, sz); + +#ifdef CONFIG_PAX_USERCOPY + kfree(temp); +#endif + + if (err) return -EFAULT; buf += sz; p += sz; @@ -854,6 +901,9 @@ static const struct memdev { #ifdef CONFIG_CRASH_DUMP [12] = { "oldmem", 0, &oldmem_fops, NULL }, #endif +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC) + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL }, +#endif }; static int memory_open(struct inode *inode, struct file *filp) diff -urNp linux-2.6.39.1/drivers/char/mmtimer.c linux-2.6.39.1/drivers/char/mmtimer.c --- linux-2.6.39.1/drivers/char/mmtimer.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/mmtimer.c 2011-05-22 19:36:31.000000000 -0400 @@ -53,7 +53,7 @@ MODULE_LICENSE("GPL"); #define RTC_BITS 55 /* 55 bits for this implementation */ -static struct k_clock sgi_clock; +static const struct k_clock sgi_clock; extern unsigned long sn_rtc_cycles_per_second; @@ -772,7 +772,7 @@ static int sgi_clock_getres(const clocki return 0; } -static struct k_clock sgi_clock = { +static const struct k_clock sgi_clock = { .clock_set = sgi_clock_set, .clock_get = sgi_clock_get, .clock_getres = sgi_clock_getres, diff -urNp linux-2.6.39.1/drivers/char/nvram.c linux-2.6.39.1/drivers/char/nvram.c --- linux-2.6.39.1/drivers/char/nvram.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/nvram.c 2011-05-22 19:36:31.000000000 -0400 @@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f spin_unlock_irq(&rtc_lock); - if (copy_to_user(buf, contents, tmp - contents)) + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents)) return -EFAULT; *ppos = i; diff -urNp linux-2.6.39.1/drivers/char/random.c linux-2.6.39.1/drivers/char/random.c --- linux-2.6.39.1/drivers/char/random.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/random.c 2011-05-22 19:41:37.000000000 -0400 @@ -261,8 +261,13 @@ /* * Configuration information */ +#ifdef CONFIG_GRKERNSEC_RANDNET +#define INPUT_POOL_WORDS 512 +#define OUTPUT_POOL_WORDS 128 +#else #define INPUT_POOL_WORDS 128 #define OUTPUT_POOL_WORDS 32 +#endif #define SEC_XFER_SIZE 512 #define EXTRACT_SIZE 10 @@ -300,10 +305,17 @@ static struct poolinfo { int poolwords; int tap1, tap2, tap3, tap4, tap5; } poolinfo_table[] = { +#ifdef CONFIG_GRKERNSEC_RANDNET + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */ + { 512, 411, 308, 208, 104, 1 }, + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */ + { 128, 103, 76, 51, 25, 1 }, +#else /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */ { 128, 103, 76, 51, 25, 1 }, /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */ { 32, 26, 20, 14, 7, 1 }, +#endif #if 0 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */ { 2048, 1638, 1231, 819, 411, 1 }, @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru extract_buf(r, tmp); i = min_t(int, nbytes, EXTRACT_SIZE); - if (copy_to_user(buf, tmp, i)) { + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) { ret = -EFAULT; break; } @@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid); #include static int min_read_thresh = 8, min_write_thresh; -static int max_read_thresh = INPUT_POOL_WORDS * 32; +static int max_read_thresh = OUTPUT_POOL_WORDS * 32; static int max_write_thresh = INPUT_POOL_WORDS * 32; static char sysctl_bootid[16]; diff -urNp linux-2.6.39.1/drivers/char/sonypi.c linux-2.6.39.1/drivers/char/sonypi.c --- linux-2.6.39.1/drivers/char/sonypi.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/sonypi.c 2011-05-22 19:36:31.000000000 -0400 @@ -55,6 +55,7 @@ #include #include #include +#include #include @@ -491,7 +492,7 @@ static struct sonypi_device { spinlock_t fifo_lock; wait_queue_head_t fifo_proc_list; struct fasync_struct *fifo_async; - int open_count; + local_t open_count; int model; struct input_dev *input_jog_dev; struct input_dev *input_key_dev; @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st static int sonypi_misc_release(struct inode *inode, struct file *file) { mutex_lock(&sonypi_device.lock); - sonypi_device.open_count--; + local_dec(&sonypi_device.open_count); mutex_unlock(&sonypi_device.lock); return 0; } @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode { mutex_lock(&sonypi_device.lock); /* Flush input queue on first open */ - if (!sonypi_device.open_count) + if (!local_read(&sonypi_device.open_count)) kfifo_reset(&sonypi_device.fifo); - sonypi_device.open_count++; + local_inc(&sonypi_device.open_count); mutex_unlock(&sonypi_device.lock); return 0; diff -urNp linux-2.6.39.1/drivers/char/tpm/tpm_bios.c linux-2.6.39.1/drivers/char/tpm/tpm_bios.c --- linux-2.6.39.1/drivers/char/tpm/tpm_bios.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/tpm/tpm_bios.c 2011-05-22 19:36:31.000000000 -0400 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start event = addr; if ((event->event_type == 0 && event->event_size == 0) || - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit)) + (event->event_size >= limit - addr - sizeof(struct tcpa_event))) return NULL; return addr; @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next( return NULL; if ((event->event_type == 0 && event->event_size == 0) || - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit)) + (event->event_size >= limit - v - sizeof(struct tcpa_event))) return NULL; (*pos)++; @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_ int i; for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++) - seq_putc(m, data[i]); + if (!seq_putc(m, data[i])) + return -EFAULT; return 0; } @@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log log->bios_event_log_end = log->bios_event_log + len; virt = acpi_os_map_memory(start, len); + if (!virt) { + kfree(log->bios_event_log); + log->bios_event_log = NULL; + return -EFAULT; + } memcpy(log->bios_event_log, virt, len); diff -urNp linux-2.6.39.1/drivers/char/tpm/tpm.c linux-2.6.39.1/drivers/char/tpm/tpm.c --- linux-2.6.39.1/drivers/char/tpm/tpm.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/tpm/tpm.c 2011-05-22 19:36:31.000000000 -0400 @@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c chip->vendor.req_complete_val) goto out_recv; - if ((status == chip->vendor.req_canceled)) { + if (status == chip->vendor.req_canceled) { dev_err(chip->dev, "Operation Canceled\n"); rc = -ECANCELED; goto out; @@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de struct tpm_chip *chip = dev_get_drvdata(dev); + pax_track_stack(); + tpm_cmd.header.in = tpm_readpubek_header; err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, "attempting to read the PUBEK"); diff -urNp linux-2.6.39.1/drivers/char/ttyprintk.c linux-2.6.39.1/drivers/char/ttyprintk.c --- linux-2.6.39.1/drivers/char/ttyprintk.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/ttyprintk.c 2011-05-22 19:36:31.000000000 -0400 @@ -170,7 +170,7 @@ static const struct tty_operations ttypr .ioctl = tpk_ioctl, }; -struct tty_port_operations null_ops = { }; +const struct tty_port_operations null_ops = { }; static struct tty_driver *ttyprintk_driver; diff -urNp linux-2.6.39.1/drivers/char/xilinx_hwicap/xilinx_hwicap.c linux-2.6.39.1/drivers/char/xilinx_hwicap/xilinx_hwicap.c --- linux-2.6.39.1/drivers/char/xilinx_hwicap/xilinx_hwicap.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/char/xilinx_hwicap/xilinx_hwicap.c 2011-05-22 19:36:31.000000000 -0400 @@ -678,14 +678,14 @@ static int __devinit hwicap_setup(struct return retval; } -static struct hwicap_driver_config buffer_icap_config = { +static const struct hwicap_driver_config buffer_icap_config = { .get_configuration = buffer_icap_get_configuration, .set_configuration = buffer_icap_set_configuration, .get_status = buffer_icap_get_status, .reset = buffer_icap_reset, }; -static struct hwicap_driver_config fifo_icap_config = { +static const struct hwicap_driver_config fifo_icap_config = { .get_configuration = fifo_icap_get_configuration, .set_configuration = fifo_icap_set_configuration, .get_status = fifo_icap_get_status, diff -urNp linux-2.6.39.1/drivers/crypto/hifn_795x.c linux-2.6.39.1/drivers/crypto/hifn_795x.c --- linux-2.6.39.1/drivers/crypto/hifn_795x.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/crypto/hifn_795x.c 2011-05-22 19:36:31.000000000 -0400 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device 0xCA, 0x34, 0x2B, 0x2E}; struct scatterlist sg; + pax_track_stack(); + memset(src, 0, sizeof(src)); memset(ctx.key, 0, sizeof(ctx.key)); diff -urNp linux-2.6.39.1/drivers/crypto/padlock-aes.c linux-2.6.39.1/drivers/crypto/padlock-aes.c --- linux-2.6.39.1/drivers/crypto/padlock-aes.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/crypto/padlock-aes.c 2011-05-22 19:36:31.000000000 -0400 @@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm struct crypto_aes_ctx gen_aes; int cpu; + pax_track_stack(); + if (key_len % 8) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; diff -urNp linux-2.6.39.1/drivers/dca/dca-core.c linux-2.6.39.1/drivers/dca/dca-core.c --- linux-2.6.39.1/drivers/dca/dca-core.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/dca/dca-core.c 2011-05-22 19:36:31.000000000 -0400 @@ -325,7 +325,7 @@ EXPORT_SYMBOL_GPL(dca_get_tag); * @ops - pointer to struct of dca operation function pointers * @priv_size - size of extra mem to be added for provider's needs */ -struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size) +struct dca_provider *alloc_dca_provider(const struct dca_ops *ops, int priv_size) { struct dca_provider *dca; int alloc_size; diff -urNp linux-2.6.39.1/drivers/dma/ioat/dca.c linux-2.6.39.1/drivers/dma/ioat/dca.c --- linux-2.6.39.1/drivers/dma/ioat/dca.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/dma/ioat/dca.c 2011-05-22 19:36:31.000000000 -0400 @@ -234,7 +234,7 @@ static int ioat_dca_dev_managed(struct d return 0; } -static struct dca_ops ioat_dca_ops = { +static const struct dca_ops ioat_dca_ops = { .add_requester = ioat_dca_add_requester, .remove_requester = ioat_dca_remove_requester, .get_tag = ioat_dca_get_tag, @@ -384,7 +384,7 @@ static u8 ioat2_dca_get_tag(struct dca_p return tag; } -static struct dca_ops ioat2_dca_ops = { +static const struct dca_ops ioat2_dca_ops = { .add_requester = ioat2_dca_add_requester, .remove_requester = ioat2_dca_remove_requester, .get_tag = ioat2_dca_get_tag, @@ -579,7 +579,7 @@ static u8 ioat3_dca_get_tag(struct dca_p return tag; } -static struct dca_ops ioat3_dca_ops = { +static const struct dca_ops ioat3_dca_ops = { .add_requester = ioat3_dca_add_requester, .remove_requester = ioat3_dca_remove_requester, .get_tag = ioat3_dca_get_tag, diff -urNp linux-2.6.39.1/drivers/edac/amd64_edac.h linux-2.6.39.1/drivers/edac/amd64_edac.h --- linux-2.6.39.1/drivers/edac/amd64_edac.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/edac/amd64_edac.h 2011-05-22 19:36:31.000000000 -0400 @@ -333,7 +333,7 @@ struct chip_select { }; struct amd64_pvt { - struct low_ops *ops; + const struct low_ops *ops; /* pci_device handles which we utilize */ struct pci_dev *F1, *F2, *F3; @@ -443,7 +443,7 @@ struct low_ops { struct amd64_family_type { const char *ctl_name; u16 f1_id, f3_id; - struct low_ops ops; + const struct low_ops ops; }; int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, diff -urNp linux-2.6.39.1/drivers/edac/edac_mc_sysfs.c linux-2.6.39.1/drivers/edac/edac_mc_sysfs.c --- linux-2.6.39.1/drivers/edac/edac_mc_sysfs.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/edac/edac_mc_sysfs.c 2011-05-22 19:36:31.000000000 -0400 @@ -760,7 +760,7 @@ static void edac_inst_grp_release(struct } /* Intermediate show/store table */ -static struct sysfs_ops inst_grp_ops = { +static const struct sysfs_ops inst_grp_ops = { .show = inst_grp_show, .store = inst_grp_store }; diff -urNp linux-2.6.39.1/drivers/edac/edac_pci_sysfs.c linux-2.6.39.1/drivers/edac/edac_pci_sysfs.c --- linux-2.6.39.1/drivers/edac/edac_pci_sysfs.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/edac/edac_pci_sysfs.c 2011-05-22 19:36:31.000000000 -0400 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */ static int edac_pci_poll_msec = 1000; /* one second workq period */ -static atomic_t pci_parity_count = ATOMIC_INIT(0); -static atomic_t pci_nonparity_count = ATOMIC_INIT(0); +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0); +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0); static struct kobject *edac_pci_top_main_kobj; static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0); @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str edac_printk(KERN_CRIT, EDAC_PCI, "Signaled System Error on %s\n", pci_name(dev)); - atomic_inc(&pci_nonparity_count); + atomic_inc_unchecked(&pci_nonparity_count); } if (status & (PCI_STATUS_PARITY)) { @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str "Master Data Parity Error on %s\n", pci_name(dev)); - atomic_inc(&pci_parity_count); + atomic_inc_unchecked(&pci_parity_count); } if (status & (PCI_STATUS_DETECTED_PARITY)) { @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str "Detected Parity Error on %s\n", pci_name(dev)); - atomic_inc(&pci_parity_count); + atomic_inc_unchecked(&pci_parity_count); } } @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " "Signaled System Error on %s\n", pci_name(dev)); - atomic_inc(&pci_nonparity_count); + atomic_inc_unchecked(&pci_nonparity_count); } if (status & (PCI_STATUS_PARITY)) { @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str "Master Data Parity Error on " "%s\n", pci_name(dev)); - atomic_inc(&pci_parity_count); + atomic_inc_unchecked(&pci_parity_count); } if (status & (PCI_STATUS_DETECTED_PARITY)) { @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str "Detected Parity Error on %s\n", pci_name(dev)); - atomic_inc(&pci_parity_count); + atomic_inc_unchecked(&pci_parity_count); } } } @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void) if (!check_pci_errors) return; - before_count = atomic_read(&pci_parity_count); + before_count = atomic_read_unchecked(&pci_parity_count); /* scan all PCI devices looking for a Parity Error on devices and * bridges. @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void) /* Only if operator has selected panic on PCI Error */ if (edac_pci_get_panic_on_pe()) { /* If the count is different 'after' from 'before' */ - if (before_count != atomic_read(&pci_parity_count)) + if (before_count != atomic_read_unchecked(&pci_parity_count)) panic("EDAC: PCI Parity Error"); } } diff -urNp linux-2.6.39.1/drivers/firewire/core-cdev.c linux-2.6.39.1/drivers/firewire/core-cdev.c --- linux-2.6.39.1/drivers/firewire/core-cdev.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/firewire/core-cdev.c 2011-05-22 19:36:31.000000000 -0400 @@ -1312,8 +1312,7 @@ static int init_iso_resource(struct clie int ret; if ((request->channels == 0 && request->bandwidth == 0) || - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || - request->bandwidth < 0) + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL) return -EINVAL; r = kmalloc(sizeof(*r), GFP_KERNEL); diff -urNp linux-2.6.39.1/drivers/firewire/core-transaction.c linux-2.6.39.1/drivers/firewire/core-transaction.c --- linux-2.6.39.1/drivers/firewire/core-transaction.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/firewire/core-transaction.c 2011-05-22 19:36:31.000000000 -0400 @@ -36,6 +36,7 @@ #include #include #include +#include #include @@ -420,6 +421,8 @@ int fw_run_transaction(struct fw_card *c struct transaction_callback_data d; struct fw_transaction t; + pax_track_stack(); + init_timer_on_stack(&t.split_timeout_timer); init_completion(&d.done); d.payload = payload; diff -urNp linux-2.6.39.1/drivers/firmware/dmi_scan.c linux-2.6.39.1/drivers/firmware/dmi_scan.c --- linux-2.6.39.1/drivers/firmware/dmi_scan.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/firmware/dmi_scan.c 2011-05-22 19:36:31.000000000 -0400 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void) } } else { - /* - * no iounmap() for that ioremap(); it would be a no-op, but - * it's so early in setup that sucker gets confused into doing - * what it shouldn't if we actually call it. - */ p = dmi_ioremap(0xF0000, 0x10000); if (p == NULL) goto error; diff -urNp linux-2.6.39.1/drivers/gpio/vr41xx_giu.c linux-2.6.39.1/drivers/gpio/vr41xx_giu.c --- linux-2.6.39.1/drivers/gpio/vr41xx_giu.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpio/vr41xx_giu.c 2011-05-22 19:36:31.000000000 -0400 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq) printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n", maskl, pendl, maskh, pendh); - atomic_inc(&irq_err_count); + atomic_inc_unchecked(&irq_err_count); return -EINVAL; } diff -urNp linux-2.6.39.1/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.39.1/drivers/gpu/drm/drm_crtc_helper.c --- linux-2.6.39.1/drivers/gpu/drm/drm_crtc_helper.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/drm_crtc_helper.c 2011-05-22 19:36:31.000000000 -0400 @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d struct drm_crtc *tmp; int crtc_mask = 1; - WARN(!crtc, "checking null crtc?\n"); + BUG_ON(!crtc); dev = crtc->dev; @@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm struct drm_encoder *encoder; bool ret = true; + pax_track_stack(); + crtc->enabled = drm_helper_crtc_in_use(crtc); if (!crtc->enabled) return true; diff -urNp linux-2.6.39.1/drivers/gpu/drm/drm_drv.c linux-2.6.39.1/drivers/gpu/drm/drm_drv.c --- linux-2.6.39.1/drivers/gpu/drm/drm_drv.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/drm_drv.c 2011-05-22 19:36:31.000000000 -0400 @@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp, dev = file_priv->minor->dev; atomic_inc(&dev->ioctl_count); - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]); ++file_priv->ioctl_count; DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", diff -urNp linux-2.6.39.1/drivers/gpu/drm/drm_fops.c linux-2.6.39.1/drivers/gpu/drm/drm_fops.c --- linux-2.6.39.1/drivers/gpu/drm/drm_fops.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/drm_fops.c 2011-05-22 19:36:31.000000000 -0400 @@ -70,7 +70,7 @@ static int drm_setup(struct drm_device * } for (i = 0; i < ARRAY_SIZE(dev->counts); i++) - atomic_set(&dev->counts[i], 0); + atomic_set_unchecked(&dev->counts[i], 0); dev->sigdata.lock = NULL; @@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct retcode = drm_open_helper(inode, filp, dev); if (!retcode) { - atomic_inc(&dev->counts[_DRM_STAT_OPENS]); - if (!dev->open_count++) + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]); + if (local_inc_return(&dev->open_count) == 1) retcode = drm_setup(dev); } if (!retcode) { @@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str mutex_lock(&drm_global_mutex); - DRM_DEBUG("open_count = %d\n", dev->open_count); + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count)); if (dev->driver->preclose) dev->driver->preclose(dev, file_priv); @@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", task_pid_nr(current), (long)old_encode_dev(file_priv->minor->device), - dev->open_count); + local_read(&dev->open_count)); /* if the master has gone away we can't do anything with the lock */ if (file_priv->minor->master) @@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str * End inline drm_release */ - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); - if (!--dev->open_count) { + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]); + if (local_dec_and_test(&dev->open_count)) { if (atomic_read(&dev->ioctl_count)) { DRM_ERROR("Device busy: %d\n", atomic_read(&dev->ioctl_count)); diff -urNp linux-2.6.39.1/drivers/gpu/drm/drm_global.c linux-2.6.39.1/drivers/gpu/drm/drm_global.c --- linux-2.6.39.1/drivers/gpu/drm/drm_global.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/drm_global.c 2011-05-22 19:36:31.000000000 -0400 @@ -36,7 +36,7 @@ struct drm_global_item { struct mutex mutex; void *object; - int refcount; + atomic_t refcount; }; static struct drm_global_item glob[DRM_GLOBAL_NUM]; @@ -49,7 +49,7 @@ void drm_global_init(void) struct drm_global_item *item = &glob[i]; mutex_init(&item->mutex); item->object = NULL; - item->refcount = 0; + atomic_set(&item->refcount, 0); } } @@ -59,7 +59,7 @@ void drm_global_release(void) for (i = 0; i < DRM_GLOBAL_NUM; ++i) { struct drm_global_item *item = &glob[i]; BUG_ON(item->object != NULL); - BUG_ON(item->refcount != 0); + BUG_ON(atomic_read(&item->refcount) != 0); } } @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa void *object; mutex_lock(&item->mutex); - if (item->refcount == 0) { + if (atomic_read(&item->refcount) == 0) { item->object = kzalloc(ref->size, GFP_KERNEL); if (unlikely(item->object == NULL)) { ret = -ENOMEM; @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa goto out_err; } - ++item->refcount; + atomic_inc(&item->refcount); ref->object = item->object; object = item->object; mutex_unlock(&item->mutex); @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl struct drm_global_item *item = &glob[ref->global_type]; mutex_lock(&item->mutex); - BUG_ON(item->refcount == 0); + BUG_ON(atomic_read(&item->refcount) == 0); BUG_ON(ref->object != item->object); - if (--item->refcount == 0) { + if (atomic_dec_and_test(&item->refcount)) { ref->release(ref); item->object = NULL; } diff -urNp linux-2.6.39.1/drivers/gpu/drm/drm_info.c linux-2.6.39.1/drivers/gpu/drm/drm_info.c --- linux-2.6.39.1/drivers/gpu/drm/drm_info.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/drm_info.c 2011-05-22 19:41:37.000000000 -0400 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void struct drm_local_map *map; struct drm_map_list *r_list; - /* Hardcoded from _DRM_FRAME_BUFFER, - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */ - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" }; + static const char * const types[] = { + [_DRM_FRAME_BUFFER] = "FB", + [_DRM_REGISTERS] = "REG", + [_DRM_SHM] = "SHM", + [_DRM_AGP] = "AGP", + [_DRM_SCATTER_GATHER] = "SG", + [_DRM_CONSISTENT] = "PCI", + [_DRM_GEM] = "GEM" }; const char *type; int i; @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void map = r_list->map; if (!map) continue; - if (map->type < 0 || map->type > 5) + if (map->type >= ARRAY_SIZE(types)) type = "??"; else type = types[map->type]; @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi vma->vm_flags & VM_MAYSHARE ? 's' : 'p', vma->vm_flags & VM_LOCKED ? 'l' : '-', vma->vm_flags & VM_IO ? 'i' : '-', +#ifdef CONFIG_GRKERNSEC_HIDESYM + 0); +#else vma->vm_pgoff); +#endif #if defined(__i386__) pgprot = pgprot_val(vma->vm_page_prot); diff -urNp linux-2.6.39.1/drivers/gpu/drm/drm_ioctl.c linux-2.6.39.1/drivers/gpu/drm/drm_ioctl.c --- linux-2.6.39.1/drivers/gpu/drm/drm_ioctl.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/drm_ioctl.c 2011-05-22 19:36:31.000000000 -0400 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev, stats->data[i].value = (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0); else - stats->data[i].value = atomic_read(&dev->counts[i]); + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]); stats->data[i].type = dev->types[i]; } diff -urNp linux-2.6.39.1/drivers/gpu/drm/drm_lock.c linux-2.6.39.1/drivers/gpu/drm/drm_lock.c --- linux-2.6.39.1/drivers/gpu/drm/drm_lock.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/drm_lock.c 2011-05-22 19:36:31.000000000 -0400 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi if (drm_lock_take(&master->lock, lock->context)) { master->lock.file_priv = file_priv; master->lock.lock_time = jiffies; - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]); break; /* Got lock */ } @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v return -EINVAL; } - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]); if (drm_lock_free(&master->lock, lock->context)) { /* FIXME: Should really bail out here. */ diff -urNp linux-2.6.39.1/drivers/gpu/drm/i810/i810_dma.c linux-2.6.39.1/drivers/gpu/drm/i810/i810_dma.c --- linux-2.6.39.1/drivers/gpu/drm/i810/i810_dma.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/i810/i810_dma.c 2011-05-22 19:36:31.000000000 -0400 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de dma->buflist[vertex->idx], vertex->discard, vertex->used); - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); - atomic_inc(&dev->counts[_DRM_STAT_DMA]); + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]); sarea_priv->last_enqueue = dev_priv->counter - 1; sarea_priv->last_dispatch = (int)hw_status[5]; @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used, mc->last_render); - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); - atomic_inc(&dev->counts[_DRM_STAT_DMA]); + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]); sarea_priv->last_enqueue = dev_priv->counter - 1; sarea_priv->last_dispatch = (int)hw_status[5]; diff -urNp linux-2.6.39.1/drivers/gpu/drm/i810/i810_drv.h linux-2.6.39.1/drivers/gpu/drm/i810/i810_drv.h --- linux-2.6.39.1/drivers/gpu/drm/i810/i810_drv.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/i810/i810_drv.h 2011-05-22 19:36:31.000000000 -0400 @@ -108,8 +108,8 @@ typedef struct drm_i810_private { int page_flipping; wait_queue_head_t irq_queue; - atomic_t irq_received; - atomic_t irq_emitted; + atomic_unchecked_t irq_received; + atomic_unchecked_t irq_emitted; int front_offset; } drm_i810_private_t; diff -urNp linux-2.6.39.1/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.39.1/drivers/gpu/drm/i915/dvo_ch7017.c --- linux-2.6.39.1/drivers/gpu/drm/i915/dvo_ch7017.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/i915/dvo_ch7017.c 2011-05-22 19:36:31.000000000 -0400 @@ -390,7 +390,7 @@ static void ch7017_destroy(struct intel_ } } -struct intel_dvo_dev_ops ch7017_ops = { +const struct intel_dvo_dev_ops ch7017_ops = { .init = ch7017_init, .detect = ch7017_detect, .mode_valid = ch7017_mode_valid, diff -urNp linux-2.6.39.1/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.39.1/drivers/gpu/drm/i915/dvo_ch7xxx.c --- linux-2.6.39.1/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-05-22 19:36:31.000000000 -0400 @@ -320,7 +320,7 @@ static void ch7xxx_destroy(struct intel_ } } -struct intel_dvo_dev_ops ch7xxx_ops = { +const struct intel_dvo_dev_ops ch7xxx_ops = { .init = ch7xxx_init, .detect = ch7xxx_detect, .mode_valid = ch7xxx_mode_valid, diff -urNp linux-2.6.39.1/drivers/gpu/drm/i915/dvo.h linux-2.6.39.1/drivers/gpu/drm/i915/dvo.h --- linux-2.6.39.1/drivers/gpu/drm/i915/dvo.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/i915/dvo.h 2011-05-22 19:36:31.000000000 -0400 @@ -122,23 +122,23 @@ struct intel_dvo_dev_ops { * * \return singly-linked list of modes or NULL if no modes found. */ - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo); + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo); /** * Clean up driver-specific bits of the output */ - void (*destroy) (struct intel_dvo_device *dvo); + void (* const destroy) (struct intel_dvo_device *dvo); /** * Debugging hook to dump device registers to log file */ - void (*dump_regs)(struct intel_dvo_device *dvo); + void (* const dump_regs)(struct intel_dvo_device *dvo); }; -extern struct intel_dvo_dev_ops sil164_ops; -extern struct intel_dvo_dev_ops ch7xxx_ops; -extern struct intel_dvo_dev_ops ivch_ops; -extern struct intel_dvo_dev_ops tfp410_ops; -extern struct intel_dvo_dev_ops ch7017_ops; +extern const struct intel_dvo_dev_ops sil164_ops; +extern const struct intel_dvo_dev_ops ch7xxx_ops; +extern const struct intel_dvo_dev_ops ivch_ops; +extern const struct intel_dvo_dev_ops tfp410_ops; +extern const struct intel_dvo_dev_ops ch7017_ops; #endif /* _INTEL_DVO_H */ diff -urNp linux-2.6.39.1/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.39.1/drivers/gpu/drm/i915/dvo_ivch.c --- linux-2.6.39.1/drivers/gpu/drm/i915/dvo_ivch.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/i915/dvo_ivch.c 2011-05-22 19:36:31.000000000 -0400 @@ -410,7 +410,7 @@ static void ivch_destroy(struct intel_dv } } -struct intel_dvo_dev_ops ivch_ops= { +const struct intel_dvo_dev_ops ivch_ops= { .init = ivch_init, .dpms = ivch_dpms, .mode_valid = ivch_mode_valid, diff -urNp linux-2.6.39.1/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.39.1/drivers/gpu/drm/i915/dvo_sil164.c --- linux-2.6.39.1/drivers/gpu/drm/i915/dvo_sil164.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/i915/dvo_sil164.c 2011-05-22 19:36:31.000000000 -0400 @@ -252,7 +252,7 @@ static void sil164_destroy(struct intel_ } } -struct intel_dvo_dev_ops sil164_ops = { +const struct intel_dvo_dev_ops sil164_ops = { .init = sil164_init, .detect = sil164_detect, .mode_valid = sil164_mode_valid, diff -urNp linux-2.6.39.1/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.39.1/drivers/gpu/drm/i915/dvo_tfp410.c --- linux-2.6.39.1/drivers/gpu/drm/i915/dvo_tfp410.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/i915/dvo_tfp410.c 2011-05-22 19:36:31.000000000 -0400 @@ -293,7 +293,7 @@ static void tfp410_destroy(struct intel_ } } -struct intel_dvo_dev_ops tfp410_ops = { +const struct intel_dvo_dev_ops tfp410_ops = { .init = tfp410_init, .detect = tfp410_detect, .mode_valid = tfp410_mode_valid, diff -urNp linux-2.6.39.1/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.39.1/drivers/gpu/drm/i915/i915_debugfs.c --- linux-2.6.39.1/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-22 19:36:31.000000000 -0400 @@ -496,7 +496,7 @@ static int i915_interrupt_info(struct se I915_READ(GTIMR)); } seq_printf(m, "Interrupts received: %d\n", - atomic_read(&dev_priv->irq_received)); + atomic_read_unchecked(&dev_priv->irq_received)); for (i = 0; i < I915_NUM_RINGS; i++) { if (IS_GEN6(dev)) { seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", diff -urNp linux-2.6.39.1/drivers/gpu/drm/i915/i915_dma.c linux-2.6.39.1/drivers/gpu/drm/i915/i915_dma.c --- linux-2.6.39.1/drivers/gpu/drm/i915/i915_dma.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/i915/i915_dma.c 2011-05-22 19:36:31.000000000 -0400 @@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(s bool can_switch; spin_lock(&dev->count_lock); - can_switch = (dev->open_count == 0); + can_switch = (local_read(&dev->open_count) == 0); spin_unlock(&dev->count_lock); return can_switch; } diff -urNp linux-2.6.39.1/drivers/gpu/drm/i915/i915_drv.c linux-2.6.39.1/drivers/gpu/drm/i915/i915_drv.c --- linux-2.6.39.1/drivers/gpu/drm/i915/i915_drv.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/i915/i915_drv.c 2011-05-22 19:36:31.000000000 -0400 @@ -679,7 +679,7 @@ static const struct dev_pm_ops i915_pm_o .restore = i915_pm_resume, }; -static struct vm_operations_struct i915_gem_vm_ops = { +static const struct vm_operations_struct i915_gem_vm_ops = { .fault = i915_gem_fault, .open = drm_gem_vm_open, .close = drm_gem_vm_close, diff -urNp linux-2.6.39.1/drivers/gpu/drm/i915/i915_drv.h linux-2.6.39.1/drivers/gpu/drm/i915/i915_drv.h --- linux-2.6.39.1/drivers/gpu/drm/i915/i915_drv.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/i915/i915_drv.h 2011-05-22 19:36:31.000000000 -0400 @@ -287,7 +287,7 @@ typedef struct drm_i915_private { int current_page; int page_flipping; - atomic_t irq_received; + atomic_unchecked_t irq_received; /* protects the irq masks */ spinlock_t irq_lock; @@ -848,7 +848,7 @@ struct drm_i915_gem_object { * will be page flipped away on the next vblank. When it * reaches 0, dev_priv->pending_flip_queue will be woken up. */ - atomic_t pending_flip; + atomic_unchecked_t pending_flip; }; #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) @@ -1232,7 +1232,7 @@ extern int intel_setup_gmbus(struct drm_ extern void intel_teardown_gmbus(struct drm_device *dev); extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) { return container_of(adapter, struct intel_gmbus, adapter)->force_bit; } diff -urNp linux-2.6.39.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-2.6.39.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c --- linux-2.6.39.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-05-22 19:36:31.000000000 -0400 @@ -192,7 +192,7 @@ i915_gem_object_set_to_gpu_domain(struct i915_gem_release_mmap(obj); if (obj->base.pending_write_domain) - cd->flips |= atomic_read(&obj->pending_flip); + cd->flips |= atomic_read_unchecked(&obj->pending_flip); /* The actual obj->write_domain will be updated with * pending_write_domain after we emit the accumulated flush for all diff -urNp linux-2.6.39.1/drivers/gpu/drm/i915/i915_irq.c linux-2.6.39.1/drivers/gpu/drm/i915/i915_irq.c --- linux-2.6.39.1/drivers/gpu/drm/i915/i915_irq.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/i915/i915_irq.c 2011-05-22 19:36:31.000000000 -0400 @@ -1101,7 +1101,7 @@ irqreturn_t i915_driver_irq_handler(DRM_ int ret = IRQ_NONE, pipe; bool blc_event = false; - atomic_inc(&dev_priv->irq_received); + atomic_inc_unchecked(&dev_priv->irq_received); if (HAS_PCH_SPLIT(dev)) return ironlake_irq_handler(dev); @@ -1655,7 +1655,7 @@ void i915_driver_irq_preinstall(struct d drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int pipe; - atomic_set(&dev_priv->irq_received, 0); + atomic_set_unchecked(&dev_priv->irq_received, 0); INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->error_work, i915_error_work_func); diff -urNp linux-2.6.39.1/drivers/gpu/drm/i915/intel_display.c linux-2.6.39.1/drivers/gpu/drm/i915/intel_display.c --- linux-2.6.39.1/drivers/gpu/drm/i915/intel_display.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/i915/intel_display.c 2011-05-22 19:36:31.000000000 -0400 @@ -2244,7 +2244,7 @@ intel_pipe_set_base(struct drm_crtc *crt wait_event(dev_priv->pending_flip_queue, atomic_read(&dev_priv->mm.wedged) || - atomic_read(&obj->pending_flip) == 0); + atomic_read_unchecked(&obj->pending_flip) == 0); /* Big Hammer, we also need to ensure that any pending * MI_WAIT_FOR_EVENT inside a user batch buffer on the @@ -2712,7 +2712,7 @@ static void intel_crtc_wait_for_pending_ obj = to_intel_framebuffer(crtc->fb)->obj; dev_priv = crtc->dev->dev_private; wait_event(dev_priv->pending_flip_queue, - atomic_read(&obj->pending_flip) == 0); + atomic_read_unchecked(&obj->pending_flip) == 0); } static bool intel_crtc_driving_pch(struct drm_crtc *crtc) @@ -6016,7 +6016,7 @@ static void do_intel_finish_page_flip(st atomic_clear_mask(1 << intel_crtc->plane, &obj->pending_flip.counter); - if (atomic_read(&obj->pending_flip) == 0) + if (atomic_read_unchecked(&obj->pending_flip) == 0) wake_up(&dev_priv->pending_flip_queue); schedule_work(&work->work); @@ -6145,7 +6145,7 @@ static int intel_crtc_page_flip(struct d /* Block clients from rendering to the new back buffer until * the flip occurs and the object is no longer visible. */ - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); switch (INTEL_INFO(dev)->gen) { case 2: diff -urNp linux-2.6.39.1/drivers/gpu/drm/mga/mga_drv.h linux-2.6.39.1/drivers/gpu/drm/mga/mga_drv.h --- linux-2.6.39.1/drivers/gpu/drm/mga/mga_drv.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/mga/mga_drv.h 2011-05-22 19:36:31.000000000 -0400 @@ -120,9 +120,9 @@ typedef struct drm_mga_private { u32 clear_cmd; u32 maccess; - atomic_t vbl_received; /**< Number of vblanks received. */ + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */ wait_queue_head_t fence_queue; - atomic_t last_fence_retired; + atomic_unchecked_t last_fence_retired; u32 next_fence_to_post; unsigned int fb_cpp; diff -urNp linux-2.6.39.1/drivers/gpu/drm/mga/mga_irq.c linux-2.6.39.1/drivers/gpu/drm/mga/mga_irq.c --- linux-2.6.39.1/drivers/gpu/drm/mga/mga_irq.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/mga/mga_irq.c 2011-05-22 19:36:31.000000000 -0400 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de if (crtc != 0) return 0; - return atomic_read(&dev_priv->vbl_received); + return atomic_read_unchecked(&dev_priv->vbl_received); } @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I /* VBLANK interrupt */ if (status & MGA_VLINEPEN) { MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR); - atomic_inc(&dev_priv->vbl_received); + atomic_inc_unchecked(&dev_priv->vbl_received); drm_handle_vblank(dev, 0); handled = 1; } @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I if ((prim_start & ~0x03) != (prim_end & ~0x03)) MGA_WRITE(MGA_PRIMEND, prim_end); - atomic_inc(&dev_priv->last_fence_retired); + atomic_inc_unchecked(&dev_priv->last_fence_retired); DRM_WAKEUP(&dev_priv->fence_queue); handled = 1; } @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev * using fences. */ DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ, - (((cur_fence = atomic_read(&dev_priv->last_fence_retired)) + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired)) - *sequence) <= (1 << 23))); *sequence = cur_fence; diff -urNp linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_acpi.c linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_acpi.c --- linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_acpi.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_acpi.c 2011-05-22 19:36:31.000000000 -0400 @@ -141,7 +141,7 @@ static int nouveau_dsm_get_client_id(str return VGA_SWITCHEROO_DIS; } -static struct vga_switcheroo_handler nouveau_dsm_handler = { +static const struct vga_switcheroo_handler nouveau_dsm_handler = { .switchto = nouveau_dsm_switchto, .power_state = nouveau_dsm_power_state, .init = nouveau_dsm_init, diff -urNp linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_drv.h linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_drv.h --- linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-06-07 18:07:24.000000000 -0400 @@ -228,7 +228,7 @@ struct nouveau_channel { struct list_head pending; uint32_t sequence; uint32_t sequence_ack; - atomic_t last_sequence_irq; + atomic_unchecked_t last_sequence_irq; } fence; /* DMA push buffer */ @@ -662,7 +662,7 @@ struct drm_nouveau_private { struct drm_global_reference mem_global_ref; struct ttm_bo_global_ref bo_global_ref; struct ttm_bo_device bdev; - atomic_t validate_sequence; + atomic_unchecked_t validate_sequence; } ttm; struct { diff -urNp linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_fence.c linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_fence.c --- linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-05-22 19:36:31.000000000 -0400 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan if (USE_REFCNT(dev)) sequence = nvchan_rd32(chan, 0x48); else - sequence = atomic_read(&chan->fence.last_sequence_irq); + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq); if (chan->fence.sequence_ack == sequence) goto out; @@ -553,7 +553,7 @@ nouveau_fence_channel_init(struct nouvea out_initialised: INIT_LIST_HEAD(&chan->fence.pending); spin_lock_init(&chan->fence.lock); - atomic_set(&chan->fence.last_sequence_irq, 0); + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0); return 0; } diff -urNp linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_gem.c linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_gem.c --- linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-05-22 19:36:31.000000000 -0400 @@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch int trycnt = 0; int ret, i; - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence); + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence); retry: if (++trycnt > 100000) { NV_ERROR(dev, "%s failed and gave up.\n", __func__); diff -urNp linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_state.c linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_state.c --- linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_state.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/nouveau/nouveau_state.c 2011-05-22 19:36:31.000000000 -0400 @@ -583,7 +583,7 @@ static bool nouveau_switcheroo_can_switc bool can_switch; spin_lock(&dev->count_lock); - can_switch = (dev->open_count == 0); + can_switch = (local_read(&dev->open_count) == 0); spin_unlock(&dev->count_lock); return can_switch; } diff -urNp linux-2.6.39.1/drivers/gpu/drm/nouveau/nv04_graph.c linux-2.6.39.1/drivers/gpu/drm/nouveau/nv04_graph.c --- linux-2.6.39.1/drivers/gpu/drm/nouveau/nv04_graph.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/nouveau/nv04_graph.c 2011-05-22 19:36:31.000000000 -0400 @@ -552,7 +552,7 @@ static int nv04_graph_mthd_set_ref(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) { - atomic_set(&chan->fence.last_sequence_irq, data); + atomic_set_unchecked(&chan->fence.last_sequence_irq, data); return 0; } diff -urNp linux-2.6.39.1/drivers/gpu/drm/r128/r128_cce.c linux-2.6.39.1/drivers/gpu/drm/r128/r128_cce.c --- linux-2.6.39.1/drivers/gpu/drm/r128/r128_cce.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/r128/r128_cce.c 2011-05-22 19:36:31.000000000 -0400 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d /* GH: Simple idle check. */ - atomic_set(&dev_priv->idle_count, 0); + atomic_set_unchecked(&dev_priv->idle_count, 0); /* We don't support anything other than bus-mastering ring mode, * but the ring can be in either AGP or PCI space for the ring diff -urNp linux-2.6.39.1/drivers/gpu/drm/r128/r128_drv.h linux-2.6.39.1/drivers/gpu/drm/r128/r128_drv.h --- linux-2.6.39.1/drivers/gpu/drm/r128/r128_drv.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/r128/r128_drv.h 2011-05-22 19:36:31.000000000 -0400 @@ -90,14 +90,14 @@ typedef struct drm_r128_private { int is_pci; unsigned long cce_buffers_offset; - atomic_t idle_count; + atomic_unchecked_t idle_count; int page_flipping; int current_page; u32 crtc_offset; u32 crtc_offset_cntl; - atomic_t vbl_received; + atomic_unchecked_t vbl_received; u32 color_fmt; unsigned int front_offset; diff -urNp linux-2.6.39.1/drivers/gpu/drm/r128/r128_irq.c linux-2.6.39.1/drivers/gpu/drm/r128/r128_irq.c --- linux-2.6.39.1/drivers/gpu/drm/r128/r128_irq.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/r128/r128_irq.c 2011-05-22 19:36:31.000000000 -0400 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d if (crtc != 0) return 0; - return atomic_read(&dev_priv->vbl_received); + return atomic_read_unchecked(&dev_priv->vbl_received); } irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_ /* VBLANK interrupt */ if (status & R128_CRTC_VBLANK_INT) { R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); - atomic_inc(&dev_priv->vbl_received); + atomic_inc_unchecked(&dev_priv->vbl_received); drm_handle_vblank(dev, 0); return IRQ_HANDLED; } diff -urNp linux-2.6.39.1/drivers/gpu/drm/r128/r128_state.c linux-2.6.39.1/drivers/gpu/drm/r128/r128_state.c --- linux-2.6.39.1/drivers/gpu/drm/r128/r128_state.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/r128/r128_state.c 2011-05-22 19:36:31.000000000 -0400 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv) { - if (atomic_read(&dev_priv->idle_count) == 0) + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0); else - atomic_set(&dev_priv->idle_count, 0); + atomic_set_unchecked(&dev_priv->idle_count, 0); } #endif diff -urNp linux-2.6.39.1/drivers/gpu/drm/radeon/atom.c linux-2.6.39.1/drivers/gpu/drm/radeon/atom.c --- linux-2.6.39.1/drivers/gpu/drm/radeon/atom.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/radeon/atom.c 2011-05-22 19:36:31.000000000 -0400 @@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c char name[512]; int i; + pax_track_stack(); + ctx->card = card; ctx->bios = bios; diff -urNp linux-2.6.39.1/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.39.1/drivers/gpu/drm/radeon/mkregtable.c --- linux-2.6.39.1/drivers/gpu/drm/radeon/mkregtable.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/radeon/mkregtable.c 2011-05-22 19:36:31.000000000 -0400 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, regex_t mask_rex; regmatch_t match[4]; char buf[1024]; - size_t end; + long end; int len; int done = 0; int r; unsigned o; struct offset *offset; char last_reg_s[10]; - int last_reg; + unsigned long last_reg; if (regcomp (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) { diff -urNp linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_atombios.c --- linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-22 19:36:31.000000000 -0400 @@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from struct radeon_gpio_rec gpio; struct radeon_hpd hpd; + pax_track_stack(); + if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) return false; diff -urNp linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_atpx_handler.c linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_atpx_handler.c --- linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_atpx_handler.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_atpx_handler.c 2011-05-22 19:36:31.000000000 -0400 @@ -234,7 +234,7 @@ static int radeon_atpx_get_client_id(str return VGA_SWITCHEROO_DIS; } -static struct vga_switcheroo_handler radeon_atpx_handler = { +static const struct vga_switcheroo_handler radeon_atpx_handler = { .switchto = radeon_atpx_switchto, .power_state = radeon_atpx_power_state, .init = radeon_atpx_init, diff -urNp linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_device.c linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_device.c --- linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_device.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_device.c 2011-05-22 19:36:31.000000000 -0400 @@ -674,7 +674,7 @@ static bool radeon_switcheroo_can_switch bool can_switch; spin_lock(&dev->count_lock); - can_switch = (dev->open_count == 0); + can_switch = (local_read(&dev->open_count) == 0); spin_unlock(&dev->count_lock); return can_switch; } diff -urNp linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_display.c --- linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_display.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_display.c 2011-05-22 19:36:31.000000000 -0400 @@ -934,6 +934,8 @@ void radeon_compute_pll_legacy(struct ra uint32_t post_div; u32 pll_out_min, pll_out_max; + pax_track_stack(); + DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); freq = freq * 1000; diff -urNp linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_drv.h --- linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-22 19:36:31.000000000 -0400 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private { /* SW interrupt */ wait_queue_head_t swi_queue; - atomic_t swi_emitted; + atomic_unchecked_t swi_emitted; int vblank_crtc; uint32_t irq_enable_reg; uint32_t r500_disp_irq_reg; diff -urNp linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_fence.c --- linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-22 19:36:31.000000000 -0400 @@ -49,7 +49,7 @@ int radeon_fence_emit(struct radeon_devi write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); return 0; } - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq); if (!rdev->cp.ready) { /* FIXME: cp is not running assume everythings is done right * away @@ -352,7 +352,7 @@ int radeon_fence_driver_init(struct rade return r; } WREG32(rdev->fence_drv.scratch_reg, 0); - atomic_set(&rdev->fence_drv.seq, 0); + atomic_set_unchecked(&rdev->fence_drv.seq, 0); INIT_LIST_HEAD(&rdev->fence_drv.created); INIT_LIST_HEAD(&rdev->fence_drv.emited); INIT_LIST_HEAD(&rdev->fence_drv.signaled); diff -urNp linux-2.6.39.1/drivers/gpu/drm/radeon/radeon.h linux-2.6.39.1/drivers/gpu/drm/radeon/radeon.h --- linux-2.6.39.1/drivers/gpu/drm/radeon/radeon.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/radeon/radeon.h 2011-05-22 19:36:31.000000000 -0400 @@ -189,7 +189,7 @@ extern int sumo_get_temp(struct radeon_d */ struct radeon_fence_driver { uint32_t scratch_reg; - atomic_t seq; + atomic_unchecked_t seq; uint32_t last_seq; unsigned long last_jiffies; unsigned long last_timeout; diff -urNp linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_ioc32.c --- linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-05-22 19:36:31.000000000 -0400 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str request = compat_alloc_user_space(sizeof(*request)); if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) || __put_user(req32.param, &request->param) - || __put_user((void __user *)(unsigned long)req32.value, + || __put_user((unsigned long)req32.value, &request->value)) return -EFAULT; diff -urNp linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_irq.c --- linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-22 19:36:31.000000000 -0400 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de unsigned int ret; RING_LOCALS; - atomic_inc(&dev_priv->swi_emitted); - ret = atomic_read(&dev_priv->swi_emitted); + atomic_inc_unchecked(&dev_priv->swi_emitted); + ret = atomic_read_unchecked(&dev_priv->swi_emitted); BEGIN_RING(4); OUT_RING_REG(RADEON_LAST_SWI_REG, ret); @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; - atomic_set(&dev_priv->swi_emitted, 0); + atomic_set_unchecked(&dev_priv->swi_emitted, 0); DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); dev->max_vblank_count = 0x001fffff; diff -urNp linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_state.c --- linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_state.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_state.c 2011-05-22 19:36:31.000000000 -0400 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes, + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes, sarea_priv->nbox * sizeof(depth_boxes[0]))) return -EFAULT; @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm { drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_getparam_t *param = data; - int value; + int value = 0; DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); diff -urNp linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_ttm.c --- linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_ttm.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/radeon/radeon_ttm.c 2011-05-22 19:36:31.000000000 -0400 @@ -603,8 +603,9 @@ void radeon_ttm_set_active_vram_size(str man->size = size >> PAGE_SHIFT; } -static struct vm_operations_struct radeon_ttm_vm_ops; -static const struct vm_operations_struct *ttm_vm_ops = NULL; +extern int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf); +extern void ttm_bo_vm_open(struct vm_area_struct *vma); +extern void ttm_bo_vm_close(struct vm_area_struct *vma); static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { @@ -612,17 +613,22 @@ static int radeon_ttm_fault(struct vm_ar struct radeon_device *rdev; int r; - bo = (struct ttm_buffer_object *)vma->vm_private_data; - if (bo == NULL) { + bo = (struct ttm_buffer_object *)vma->vm_private_data; + if (!bo) return VM_FAULT_NOPAGE; - } rdev = radeon_get_rdev(bo->bdev); mutex_lock(&rdev->vram_mutex); - r = ttm_vm_ops->fault(vma, vmf); + r = ttm_bo_vm_fault(vma, vmf); mutex_unlock(&rdev->vram_mutex); return r; } +static const struct vm_operations_struct radeon_ttm_vm_ops = { + .fault = radeon_ttm_fault, + .open = ttm_bo_vm_open, + .close = ttm_bo_vm_close +}; + int radeon_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *file_priv; @@ -635,18 +641,11 @@ int radeon_mmap(struct file *filp, struc file_priv = filp->private_data; rdev = file_priv->minor->dev->dev_private; - if (rdev == NULL) { + if (!rdev) return -EINVAL; - } r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev); - if (unlikely(r != 0)) { + if (r) return r; - } - if (unlikely(ttm_vm_ops == NULL)) { - ttm_vm_ops = vma->vm_ops; - radeon_ttm_vm_ops = *ttm_vm_ops; - radeon_ttm_vm_ops.fault = &radeon_ttm_fault; - } vma->vm_ops = &radeon_ttm_vm_ops; return 0; } diff -urNp linux-2.6.39.1/drivers/gpu/drm/radeon/rs690.c linux-2.6.39.1/drivers/gpu/drm/radeon/rs690.c --- linux-2.6.39.1/drivers/gpu/drm/radeon/rs690.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/radeon/rs690.c 2011-05-22 19:36:31.000000000 -0400 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && rdev->pm.sideport_bandwidth.full) rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; - read_delay_latency.full = dfixed_const(370 * 800 * 1000); + read_delay_latency.full = dfixed_const(800 * 1000); read_delay_latency.full = dfixed_div(read_delay_latency, rdev->pm.igp_sideport_mclk); + a.full = dfixed_const(370); + read_delay_latency.full = dfixed_mul(read_delay_latency, a); } else { if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && rdev->pm.k8_bandwidth.full) diff -urNp linux-2.6.39.1/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.39.1/drivers/gpu/drm/ttm/ttm_bo_vm.c --- linux-2.6.39.1/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-05-22 19:36:31.000000000 -0400 @@ -69,11 +69,11 @@ static struct ttm_buffer_object *ttm_bo_ return best_bo; } -static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct ttm_buffer_object *bo = (struct ttm_buffer_object *) vma->vm_private_data; - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_device *bdev; unsigned long page_offset; unsigned long page_last; unsigned long pfn; @@ -83,8 +83,12 @@ static int ttm_bo_vm_fault(struct vm_are int i; unsigned long address = (unsigned long)vmf->virtual_address; int retval = VM_FAULT_NOPAGE; - struct ttm_mem_type_manager *man = - &bdev->man[bo->mem.mem_type]; + struct ttm_mem_type_manager *man; + + if (!bo) + return VM_FAULT_NOPAGE; + bdev = bo->bdev; + man = &bdev->man[bo->mem.mem_type]; /* * Work around locking order reversal in fault / nopfn @@ -219,22 +223,25 @@ out_unlock: ttm_bo_unreserve(bo); return retval; } +EXPORT_SYMBOL(ttm_bo_vm_fault); -static void ttm_bo_vm_open(struct vm_area_struct *vma) +void ttm_bo_vm_open(struct vm_area_struct *vma) { struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; (void)ttm_bo_reference(bo); } +EXPORT_SYMBOL(ttm_bo_vm_open); -static void ttm_bo_vm_close(struct vm_area_struct *vma) +void ttm_bo_vm_close(struct vm_area_struct *vma) { struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; ttm_bo_unref(&bo); vma->vm_private_data = NULL; } +EXPORT_SYMBOL(ttm_bo_vm_close); static const struct vm_operations_struct ttm_bo_vm_ops = { .fault = ttm_bo_vm_fault, diff -urNp linux-2.6.39.1/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-2.6.39.1/drivers/gpu/drm/ttm/ttm_page_alloc.c --- linux-2.6.39.1/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-05-22 19:36:31.000000000 -0400 @@ -397,9 +397,9 @@ static int ttm_pool_get_num_unused_pages */ static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask) { - static atomic_t start_pool = ATOMIC_INIT(0); + static atomic_unchecked_t start_pool = ATOMIC_INIT(0); unsigned i; - unsigned pool_offset = atomic_add_return(1, &start_pool); + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool); struct ttm_page_pool *pool; pool_offset = pool_offset % NUM_POOLS; diff -urNp linux-2.6.39.1/drivers/gpu/drm/via/via_drv.h linux-2.6.39.1/drivers/gpu/drm/via/via_drv.h --- linux-2.6.39.1/drivers/gpu/drm/via/via_drv.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/via/via_drv.h 2011-05-22 19:36:31.000000000 -0400 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer { typedef uint32_t maskarray_t[5]; typedef struct drm_via_irq { - atomic_t irq_received; + atomic_unchecked_t irq_received; uint32_t pending_mask; uint32_t enable_mask; wait_queue_head_t irq_queue; @@ -75,7 +75,7 @@ typedef struct drm_via_private { struct timeval last_vblank; int last_vblank_valid; unsigned usec_per_vblank; - atomic_t vbl_received; + atomic_unchecked_t vbl_received; drm_via_state_t hc_state; char pci_buf[VIA_PCI_BUF_SIZE]; const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; diff -urNp linux-2.6.39.1/drivers/gpu/drm/via/via_irq.c linux-2.6.39.1/drivers/gpu/drm/via/via_irq.c --- linux-2.6.39.1/drivers/gpu/drm/via/via_irq.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/via/via_irq.c 2011-05-22 19:36:31.000000000 -0400 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de if (crtc != 0) return 0; - return atomic_read(&dev_priv->vbl_received); + return atomic_read_unchecked(&dev_priv->vbl_received); } irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I status = VIA_READ(VIA_REG_INTERRUPT); if (status & VIA_IRQ_VBLANK_PENDING) { - atomic_inc(&dev_priv->vbl_received); - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) { + atomic_inc_unchecked(&dev_priv->vbl_received); + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) { do_gettimeofday(&cur_vblank); if (dev_priv->last_vblank_valid) { dev_priv->usec_per_vblank = @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I dev_priv->last_vblank = cur_vblank; dev_priv->last_vblank_valid = 1; } - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) { + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) { DRM_DEBUG("US per vblank is: %u\n", dev_priv->usec_per_vblank); } @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I for (i = 0; i < dev_priv->num_irqs; ++i) { if (status & cur_irq->pending_mask) { - atomic_inc(&cur_irq->irq_received); + atomic_inc_unchecked(&cur_irq->irq_received); DRM_WAKEUP(&cur_irq->irq_queue); handled = 1; if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, ((VIA_READ(masks[irq][2]) & masks[irq][3]) == masks[irq][4])); - cur_irq_sequence = atomic_read(&cur_irq->irq_received); + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received); } else { DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, (((cur_irq_sequence = - atomic_read(&cur_irq->irq_received)) - + atomic_read_unchecked(&cur_irq->irq_received)) - *sequence) <= (1 << 23))); } *sequence = cur_irq_sequence; @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr } for (i = 0; i < dev_priv->num_irqs; ++i) { - atomic_set(&cur_irq->irq_received, 0); + atomic_set_unchecked(&cur_irq->irq_received, 0); cur_irq->enable_mask = dev_priv->irq_masks[i][0]; cur_irq->pending_mask = dev_priv->irq_masks[i][1]; DRM_INIT_WAITQUEUE(&cur_irq->irq_queue); @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) { case VIA_IRQ_RELATIVE: irqwait->request.sequence += - atomic_read(&cur_irq->irq_received); + atomic_read_unchecked(&cur_irq->irq_received); irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; case VIA_IRQ_ABSOLUTE: break; diff -urNp linux-2.6.39.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-2.6.39.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h --- linux-2.6.39.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-05-22 19:36:31.000000000 -0400 @@ -240,7 +240,7 @@ struct vmw_private { * Fencing and IRQs. */ - atomic_t fence_seq; + atomic_unchecked_t fence_seq; wait_queue_head_t fence_queue; wait_queue_head_t fifo_queue; atomic_t fence_queue_waiters; diff -urNp linux-2.6.39.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-2.6.39.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c --- linux-2.6.39.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-05-22 19:36:31.000000000 -0400 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev while (!vmw_lag_lt(queue, us)) { spin_lock(&queue->lock); if (list_empty(&queue->head)) - sequence = atomic_read(&dev_priv->fence_seq); + sequence = atomic_read_unchecked(&dev_priv->fence_seq); else { fence = list_first_entry(&queue->head, struct vmw_fence, head); diff -urNp linux-2.6.39.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-2.6.39.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c --- linux-2.6.39.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-05-22 19:36:31.000000000 -0400 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de (unsigned int) min, (unsigned int) fifo->capabilities); - atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); + atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence); iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); vmw_fence_queue_init(&fifo->fence_queue); return vmw_fifo_send_fence(dev_priv, &dummy); @@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva fm = vmw_fifo_reserve(dev_priv, bytes); if (unlikely(fm == NULL)) { - *sequence = atomic_read(&dev_priv->fence_seq); + *sequence = atomic_read_unchecked(&dev_priv->fence_seq); ret = -ENOMEM; (void)vmw_fallback_wait(dev_priv, false, true, *sequence, false, 3*HZ); @@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva } do { - *sequence = atomic_add_return(1, &dev_priv->fence_seq); + *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq); } while (*sequence == 0); if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { @@ -534,7 +534,7 @@ static int vmw_fifo_vm_fault(struct vm_a return VM_FAULT_SIGBUS; } -static struct vm_operations_struct vmw_fifo_vm_ops = { +static const struct vm_operations_struct vmw_fifo_vm_ops = { .fault = vmw_fifo_vm_fault, .open = NULL, .close = NULL diff -urNp linux-2.6.39.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-2.6.39.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c --- linux-2.6.39.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-05-22 19:36:31.000000000 -0400 @@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva * emitted. Then the fence is stale and signaled. */ - ret = ((atomic_read(&dev_priv->fence_seq) - sequence) + ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence) > VMW_FENCE_WRAP); return ret; @@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private if (fifo_idle) down_read(&fifo_state->rwsem); - signal_seq = atomic_read(&dev_priv->fence_seq); + signal_seq = atomic_read_unchecked(&dev_priv->fence_seq); ret = 0; for (;;) { diff -urNp linux-2.6.39.1/drivers/gpu/vga/vga_switcheroo.c linux-2.6.39.1/drivers/gpu/vga/vga_switcheroo.c --- linux-2.6.39.1/drivers/gpu/vga/vga_switcheroo.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/gpu/vga/vga_switcheroo.c 2011-05-22 19:36:31.000000000 -0400 @@ -53,7 +53,7 @@ struct vgasr_priv { int registered_clients; struct vga_switcheroo_client clients[VGA_SWITCHEROO_MAX_CLIENTS]; - struct vga_switcheroo_handler *handler; + const struct vga_switcheroo_handler *handler; }; static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv); @@ -62,7 +62,7 @@ static void vga_switcheroo_debugfs_fini( /* only one switcheroo per system */ static struct vgasr_priv vgasr_priv; -int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) +int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler) { mutex_lock(&vgasr_mutex); if (vgasr_priv.handler) { diff -urNp linux-2.6.39.1/drivers/hid/hid-core.c linux-2.6.39.1/drivers/hid/hid-core.c --- linux-2.6.39.1/drivers/hid/hid-core.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/hid/hid-core.c 2011-05-22 19:36:31.000000000 -0400 @@ -1888,7 +1888,7 @@ static bool hid_ignore(struct hid_device int hid_add_device(struct hid_device *hdev) { - static atomic_t id = ATOMIC_INIT(0); + static atomic_unchecked_t id = ATOMIC_INIT(0); int ret; if (WARN_ON(hdev->status & HID_STAT_ADDED)) @@ -1903,7 +1903,7 @@ int hid_add_device(struct hid_device *hd /* XXX hack, any other cleaner solution after the driver core * is converted to allow more than 20 bytes as the device name? */ dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus, - hdev->vendor, hdev->product, atomic_inc_return(&id)); + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id)); hid_debug_register(hdev, dev_name(&hdev->dev)); ret = device_add(&hdev->dev); diff -urNp linux-2.6.39.1/drivers/hid/hid-picolcd.c linux-2.6.39.1/drivers/hid/hid-picolcd.c --- linux-2.6.39.1/drivers/hid/hid-picolcd.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/hid/hid-picolcd.c 2011-05-22 19:36:31.000000000 -0400 @@ -1037,7 +1037,7 @@ static int picolcd_check_lcd_fb(struct l return fb && fb == picolcd_fbinfo((struct picolcd_data *)lcd_get_data(ldev)); } -static struct lcd_ops picolcd_lcdops = { +static const struct lcd_ops picolcd_lcdops = { .get_contrast = picolcd_get_contrast, .set_contrast = picolcd_set_contrast, .check_fb = picolcd_check_lcd_fb, diff -urNp linux-2.6.39.1/drivers/hid/usbhid/hiddev.c linux-2.6.39.1/drivers/hid/usbhid/hiddev.c --- linux-2.6.39.1/drivers/hid/usbhid/hiddev.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/hid/usbhid/hiddev.c 2011-05-22 19:36:31.000000000 -0400 @@ -613,7 +613,7 @@ static long hiddev_ioctl(struct file *fi break; case HIDIOCAPPLICATION: - if (arg < 0 || arg >= hid->maxapplication) + if (arg >= hid->maxapplication) break; for (i = 0; i < hid->maxcollection; i++) diff -urNp linux-2.6.39.1/drivers/hwmon/ibmaem.c linux-2.6.39.1/drivers/hwmon/ibmaem.c --- linux-2.6.39.1/drivers/hwmon/ibmaem.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/hwmon/ibmaem.c 2011-05-22 19:36:31.000000000 -0400 @@ -238,7 +238,7 @@ struct aem_read_sensor_resp { struct aem_driver_data { struct list_head aem_devices; struct ipmi_smi_watcher bmc_events; - struct ipmi_user_hndl ipmi_hndlrs; + const struct ipmi_user_hndl ipmi_hndlrs; }; static void aem_register_bmc(int iface, struct device *dev); diff -urNp linux-2.6.39.1/drivers/hwmon/ibmpex.c linux-2.6.39.1/drivers/hwmon/ibmpex.c --- linux-2.6.39.1/drivers/hwmon/ibmpex.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/hwmon/ibmpex.c 2011-05-22 19:36:31.000000000 -0400 @@ -110,7 +110,7 @@ struct ibmpex_bmc_data { struct ibmpex_driver_data { struct list_head bmc_data; struct ipmi_smi_watcher bmc_events; - struct ipmi_user_hndl ipmi_hndlrs; + const struct ipmi_user_hndl ipmi_hndlrs; }; static struct ibmpex_driver_data driver_data = { diff -urNp linux-2.6.39.1/drivers/hwmon/sht15.c linux-2.6.39.1/drivers/hwmon/sht15.c --- linux-2.6.39.1/drivers/hwmon/sht15.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/hwmon/sht15.c 2011-05-22 19:36:31.000000000 -0400 @@ -113,7 +113,7 @@ struct sht15_data { int supply_uV; int supply_uV_valid; struct work_struct update_supply_work; - atomic_t interrupt_handled; + atomic_unchecked_t interrupt_handled; }; /** @@ -246,13 +246,13 @@ static inline int sht15_update_single_va return ret; gpio_direction_input(data->pdata->gpio_data); - atomic_set(&data->interrupt_handled, 0); + atomic_set_unchecked(&data->interrupt_handled, 0); enable_irq(gpio_to_irq(data->pdata->gpio_data)); if (gpio_get_value(data->pdata->gpio_data) == 0) { disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); /* Only relevant if the interrupt hasn't occurred. */ - if (!atomic_read(&data->interrupt_handled)) + if (!atomic_read_unchecked(&data->interrupt_handled)) schedule_work(&data->read_work); } ret = wait_event_timeout(data->wait_queue, @@ -399,7 +399,7 @@ static irqreturn_t sht15_interrupt_fired struct sht15_data *data = d; /* First disable the interrupt */ disable_irq_nosync(irq); - atomic_inc(&data->interrupt_handled); + atomic_inc_unchecked(&data->interrupt_handled); /* Then schedule a reading work struct */ if (data->flag != SHT15_READING_NOTHING) schedule_work(&data->read_work); @@ -450,11 +450,11 @@ static void sht15_bh_read_data(struct wo here as could have gone low in meantime so verify it hasn't! */ - atomic_set(&data->interrupt_handled, 0); + atomic_set_unchecked(&data->interrupt_handled, 0); enable_irq(gpio_to_irq(data->pdata->gpio_data)); /* If still not occurred or another handler has been scheduled */ if (gpio_get_value(data->pdata->gpio_data) - || atomic_read(&data->interrupt_handled)) + || atomic_read_unchecked(&data->interrupt_handled)) return; } /* Read the data back from the device */ diff -urNp linux-2.6.39.1/drivers/hwmon/w83791d.c linux-2.6.39.1/drivers/hwmon/w83791d.c --- linux-2.6.39.1/drivers/hwmon/w83791d.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/hwmon/w83791d.c 2011-05-22 19:36:31.000000000 -0400 @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli struct i2c_board_info *info); static int w83791d_remove(struct i2c_client *client); -static int w83791d_read(struct i2c_client *client, u8 register); -static int w83791d_write(struct i2c_client *client, u8 register, u8 value); +static int w83791d_read(struct i2c_client *client, u8 reg); +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value); static struct w83791d_data *w83791d_update_device(struct device *dev); #ifdef DEBUG diff -urNp linux-2.6.39.1/drivers/i2c/busses/i2c-at91.c linux-2.6.39.1/drivers/i2c/busses/i2c-at91.c --- linux-2.6.39.1/drivers/i2c/busses/i2c-at91.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/i2c/busses/i2c-at91.c 2011-05-22 19:36:31.000000000 -0400 @@ -181,7 +181,7 @@ static u32 at91_func(struct i2c_adapter return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } -static struct i2c_algorithm at91_algorithm = { +static const struct i2c_algorithm at91_algorithm = { .master_xfer = at91_xfer, .functionality = at91_func, }; diff -urNp linux-2.6.39.1/drivers/i2c/busses/i2c-bfin-twi.c linux-2.6.39.1/drivers/i2c/busses/i2c-bfin-twi.c --- linux-2.6.39.1/drivers/i2c/busses/i2c-bfin-twi.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/i2c/busses/i2c-bfin-twi.c 2011-05-22 19:36:31.000000000 -0400 @@ -599,7 +599,7 @@ static u32 bfin_twi_functionality(struct I2C_FUNC_I2C | I2C_FUNC_SMBUS_I2C_BLOCK; } -static struct i2c_algorithm bfin_twi_algorithm = { +static const struct i2c_algorithm bfin_twi_algorithm = { .master_xfer = bfin_twi_master_xfer, .smbus_xfer = bfin_twi_smbus_xfer, .functionality = bfin_twi_functionality, diff -urNp linux-2.6.39.1/drivers/i2c/busses/i2c-davinci.c linux-2.6.39.1/drivers/i2c/busses/i2c-davinci.c --- linux-2.6.39.1/drivers/i2c/busses/i2c-davinci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/i2c/busses/i2c-davinci.c 2011-05-22 19:36:31.000000000 -0400 @@ -630,7 +630,7 @@ static inline void i2c_davinci_cpufreq_d } #endif -static struct i2c_algorithm i2c_davinci_algo = { +static const struct i2c_algorithm i2c_davinci_algo = { .master_xfer = i2c_davinci_xfer, .functionality = i2c_davinci_func, }; diff -urNp linux-2.6.39.1/drivers/i2c/busses/i2c-designware.c linux-2.6.39.1/drivers/i2c/busses/i2c-designware.c --- linux-2.6.39.1/drivers/i2c/busses/i2c-designware.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/i2c/busses/i2c-designware.c 2011-05-22 19:36:31.000000000 -0400 @@ -689,7 +689,7 @@ tx_aborted: return IRQ_HANDLED; } -static struct i2c_algorithm i2c_dw_algo = { +static const struct i2c_algorithm i2c_dw_algo = { .master_xfer = i2c_dw_xfer, .functionality = i2c_dw_func, }; diff -urNp linux-2.6.39.1/drivers/i2c/busses/i2c-eg20t.c linux-2.6.39.1/drivers/i2c/busses/i2c-eg20t.c --- linux-2.6.39.1/drivers/i2c/busses/i2c-eg20t.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/i2c/busses/i2c-eg20t.c 2011-05-22 19:36:31.000000000 -0400 @@ -708,7 +708,7 @@ static u32 pch_i2c_func(struct i2c_adapt return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR; } -static struct i2c_algorithm pch_algorithm = { +static const struct i2c_algorithm pch_algorithm = { .master_xfer = pch_i2c_xfer, .functionality = pch_i2c_func }; diff -urNp linux-2.6.39.1/drivers/i2c/busses/i2c-imx.c linux-2.6.39.1/drivers/i2c/busses/i2c-imx.c --- linux-2.6.39.1/drivers/i2c/busses/i2c-imx.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/i2c/busses/i2c-imx.c 2011-05-22 19:36:31.000000000 -0400 @@ -457,7 +457,7 @@ static u32 i2c_imx_func(struct i2c_adapt return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } -static struct i2c_algorithm i2c_imx_algo = { +static const struct i2c_algorithm i2c_imx_algo = { .master_xfer = i2c_imx_xfer, .functionality = i2c_imx_func, }; diff -urNp linux-2.6.39.1/drivers/i2c/busses/i2c-intel-mid.c linux-2.6.39.1/drivers/i2c/busses/i2c-intel-mid.c --- linux-2.6.39.1/drivers/i2c/busses/i2c-intel-mid.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/i2c/busses/i2c-intel-mid.c 2011-05-22 19:36:31.000000000 -0400 @@ -917,7 +917,7 @@ err: return IRQ_HANDLED; } -static struct i2c_algorithm intel_mid_i2c_algorithm = { +static const struct i2c_algorithm intel_mid_i2c_algorithm = { .master_xfer = intel_mid_i2c_xfer, .functionality = intel_mid_i2c_func, }; diff -urNp linux-2.6.39.1/drivers/i2c/busses/i2c-nforce2.c linux-2.6.39.1/drivers/i2c/busses/i2c-nforce2.c --- linux-2.6.39.1/drivers/i2c/busses/i2c-nforce2.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/i2c/busses/i2c-nforce2.c 2011-05-22 19:36:31.000000000 -0400 @@ -303,7 +303,7 @@ static u32 nforce2_func(struct i2c_adapt I2C_FUNC_SMBUS_BLOCK_DATA : 0); } -static struct i2c_algorithm smbus_algorithm = { +static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = nforce2_access, .functionality = nforce2_func, }; diff -urNp linux-2.6.39.1/drivers/i2c/busses/i2c-pmcmsp.c linux-2.6.39.1/drivers/i2c/busses/i2c-pmcmsp.c --- linux-2.6.39.1/drivers/i2c/busses/i2c-pmcmsp.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/i2c/busses/i2c-pmcmsp.c 2011-05-22 19:36:31.000000000 -0400 @@ -615,7 +615,7 @@ static u32 pmcmsptwi_i2c_func(struct i2c /* -- Initialization -- */ -static struct i2c_algorithm pmcmsptwi_algo = { +static const struct i2c_algorithm pmcmsptwi_algo = { .master_xfer = pmcmsptwi_master_xfer, .functionality = pmcmsptwi_i2c_func, }; diff -urNp linux-2.6.39.1/drivers/i2c/busses/i2c-pnx.c linux-2.6.39.1/drivers/i2c/busses/i2c-pnx.c --- linux-2.6.39.1/drivers/i2c/busses/i2c-pnx.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/i2c/busses/i2c-pnx.c 2011-05-22 19:36:31.000000000 -0400 @@ -535,7 +535,7 @@ static u32 i2c_pnx_func(struct i2c_adapt return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } -static struct i2c_algorithm pnx_algorithm = { +static const struct i2c_algorithm pnx_algorithm = { .master_xfer = i2c_pnx_xfer, .functionality = i2c_pnx_func, }; diff -urNp linux-2.6.39.1/drivers/i2c/busses/i2c-puv3.c linux-2.6.39.1/drivers/i2c/busses/i2c-puv3.c --- linux-2.6.39.1/drivers/i2c/busses/i2c-puv3.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/i2c/busses/i2c-puv3.c 2011-05-22 19:36:31.000000000 -0400 @@ -176,7 +176,7 @@ static u32 puv3_i2c_func(struct i2c_adap return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } -static struct i2c_algorithm puv3_i2c_algorithm = { +static const struct i2c_algorithm puv3_i2c_algorithm = { .master_xfer = puv3_i2c_xfer, .functionality = puv3_i2c_func, }; diff -urNp linux-2.6.39.1/drivers/i2c/busses/i2c-s6000.c linux-2.6.39.1/drivers/i2c/busses/i2c-s6000.c --- linux-2.6.39.1/drivers/i2c/busses/i2c-s6000.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/i2c/busses/i2c-s6000.c 2011-05-22 19:36:31.000000000 -0400 @@ -243,7 +243,7 @@ static u32 s6i2c_functionality(struct i2 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } -static struct i2c_algorithm s6i2c_algorithm = { +static const struct i2c_algorithm s6i2c_algorithm = { .master_xfer = s6i2c_master_xfer, .functionality = s6i2c_functionality, }; diff -urNp linux-2.6.39.1/drivers/i2c/busses/i2c-sh_mobile.c linux-2.6.39.1/drivers/i2c/busses/i2c-sh_mobile.c --- linux-2.6.39.1/drivers/i2c/busses/i2c-sh_mobile.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/i2c/busses/i2c-sh_mobile.c 2011-05-22 19:36:31.000000000 -0400 @@ -529,7 +529,7 @@ static u32 sh_mobile_i2c_func(struct i2c return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } -static struct i2c_algorithm sh_mobile_i2c_algorithm = { +static const struct i2c_algorithm sh_mobile_i2c_algorithm = { .functionality = sh_mobile_i2c_func, .master_xfer = sh_mobile_i2c_xfer, }; diff -urNp linux-2.6.39.1/drivers/ide/ide-cd.c linux-2.6.39.1/drivers/ide/ide-cd.c --- linux-2.6.39.1/drivers/ide/ide-cd.c 2011-06-03 00:04:14.000000000 -0400 +++ linux-2.6.39.1/drivers/ide/ide-cd.c 2011-06-03 00:32:05.000000000 -0400 @@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_ alignment = queue_dma_alignment(q) | q->dma_pad_mask; if ((unsigned long)buf & alignment || blk_rq_bytes(rq) & q->dma_pad_mask - || object_is_on_stack(buf)) + || object_starts_on_stack(buf)) drive->dma = 0; } } diff -urNp linux-2.6.39.1/drivers/ide/ide-floppy.c linux-2.6.39.1/drivers/ide/ide-floppy.c --- linux-2.6.39.1/drivers/ide/ide-floppy.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ide/ide-floppy.c 2011-05-22 19:36:31.000000000 -0400 @@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d u8 pc_buf[256], header_len, desc_cnt; int i, rc = 1, blocks, length; + pax_track_stack(); + ide_debug_log(IDE_DBG_FUNC, "enter"); drive->bios_cyl = 0; diff -urNp linux-2.6.39.1/drivers/ide/it821x.c linux-2.6.39.1/drivers/ide/it821x.c --- linux-2.6.39.1/drivers/ide/it821x.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ide/it821x.c 2011-05-22 19:36:31.000000000 -0400 @@ -508,7 +508,7 @@ static void it821x_quirkproc(ide_drive_t } -static struct ide_dma_ops it821x_pass_through_dma_ops = { +static const struct ide_dma_ops it821x_pass_through_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = it821x_dma_start, diff -urNp linux-2.6.39.1/drivers/ide/setup-pci.c linux-2.6.39.1/drivers/ide/setup-pci.c --- linux-2.6.39.1/drivers/ide/setup-pci.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ide/setup-pci.c 2011-05-22 19:36:31.000000000 -0400 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev int ret, i, n_ports = dev2 ? 4 : 2; struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL }; + pax_track_stack(); + for (i = 0; i < n_ports / 2; i++) { ret = ide_setup_pci_controller(pdev[i], d, !i); if (ret < 0) diff -urNp linux-2.6.39.1/drivers/ide/trm290.c linux-2.6.39.1/drivers/ide/trm290.c --- linux-2.6.39.1/drivers/ide/trm290.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/ide/trm290.c 2011-05-22 19:36:31.000000000 -0400 @@ -314,7 +314,7 @@ static const struct ide_tp_ops trm290_tp .output_data = ide_output_data, }; -static struct ide_dma_ops trm290_dma_ops = { +static const struct ide_dma_ops trm290_dma_ops = { .dma_host_set = trm290_dma_host_set, .dma_setup = trm290_dma_setup, .dma_start = trm290_dma_start, diff -urNp linux-2.6.39.1/drivers/infiniband/core/cm.c linux-2.6.39.1/drivers/infiniband/core/cm.c --- linux-2.6.39.1/drivers/infiniband/core/cm.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/infiniband/core/cm.c 2011-05-22 19:36:31.000000000 -0400 @@ -113,7 +113,7 @@ static char const counter_group_names[CM struct cm_counter_group { struct kobject obj; - atomic_long_t counter[CM_ATTR_COUNT]; + atomic_long_unchecked_t counter[CM_ATTR_COUNT]; }; struct cm_counter_attribute { @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm struct ib_mad_send_buf *msg = NULL; int ret; - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_REQ_COUNTER]); /* Quick state check to discard duplicate REQs. */ @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm if (!cm_id_priv) return; - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_REP_COUNTER]); ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); if (ret) @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work if (cm_id_priv->id.state != IB_CM_REP_SENT && cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { spin_unlock_irq(&cm_id_priv->lock); - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_RTU_COUNTER]); goto out; } @@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, dreq_msg->local_comm_id); if (!cm_id_priv) { - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); cm_issue_drep(work->port, work->mad_recv_wc); return -EINVAL; @@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor case IB_CM_MRA_REP_RCVD: break; case IB_CM_TIMEWAIT: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) goto unlock; @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor cm_free_msg(msg); goto deref; case IB_CM_DREQ_RCVD: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_DREQ_COUNTER]); goto unlock; default: @@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work ib_modify_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg, timeout)) { if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) - atomic_long_inc(&work->port-> + atomic_long_inc_unchecked(&work->port-> counter_group[CM_RECV_DUPLICATES]. counter[CM_MRA_COUNTER]); goto out; @@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work break; case IB_CM_MRA_REQ_RCVD: case IB_CM_MRA_REP_RCVD: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_MRA_COUNTER]); /* fall through */ default: @@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work case IB_CM_LAP_IDLE: break; case IB_CM_MRA_LAP_SENT: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_LAP_COUNTER]); if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) goto unlock; @@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work cm_free_msg(msg); goto deref; case IB_CM_LAP_RCVD: - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_LAP_COUNTER]); goto unlock; default: @@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); if (cur_cm_id_priv) { spin_unlock_irq(&cm.lock); - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. counter[CM_SIDR_REQ_COUNTER]); goto out; /* Duplicate message. */ } @@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma if (!msg->context[0] && (attr_index != CM_REJ_COUNTER)) msg->retries = 1; - atomic_long_add(1 + msg->retries, + atomic_long_add_unchecked(1 + msg->retries, &port->counter_group[CM_XMIT].counter[attr_index]); if (msg->retries) - atomic_long_add(msg->retries, + atomic_long_add_unchecked(msg->retries, &port->counter_group[CM_XMIT_RETRIES]. counter[attr_index]); @@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma } attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); - atomic_long_inc(&port->counter_group[CM_RECV]. + atomic_long_inc_unchecked(&port->counter_group[CM_RECV]. counter[attr_id - CM_ATTR_ID_OFFSET]); work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, @@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko cm_attr = container_of(attr, struct cm_counter_attribute, attr); return sprintf(buf, "%ld\n", - atomic_long_read(&group->counter[cm_attr->index])); + atomic_long_read_unchecked(&group->counter[cm_attr->index])); } static const struct sysfs_ops cm_counter_ops = { diff -urNp linux-2.6.39.1/drivers/infiniband/core/fmr_pool.c linux-2.6.39.1/drivers/infiniband/core/fmr_pool.c --- linux-2.6.39.1/drivers/infiniband/core/fmr_pool.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/infiniband/core/fmr_pool.c 2011-05-22 19:36:31.000000000 -0400 @@ -97,8 +97,8 @@ struct ib_fmr_pool { struct task_struct *thread; - atomic_t req_ser; - atomic_t flush_ser; + atomic_unchecked_t req_ser; + atomic_unchecked_t flush_ser; wait_queue_head_t force_wait; }; @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p struct ib_fmr_pool *pool = pool_ptr; do { - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) { + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) { ib_fmr_batch_release(pool); - atomic_inc(&pool->flush_ser); + atomic_inc_unchecked(&pool->flush_ser); wake_up_interruptible(&pool->force_wait); if (pool->flush_function) @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p } set_current_state(TASK_INTERRUPTIBLE); - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 && + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 && !kthread_should_stop()) schedule(); __set_current_state(TASK_RUNNING); @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s pool->dirty_watermark = params->dirty_watermark; pool->dirty_len = 0; spin_lock_init(&pool->pool_lock); - atomic_set(&pool->req_ser, 0); - atomic_set(&pool->flush_ser, 0); + atomic_set_unchecked(&pool->req_ser, 0); + atomic_set_unchecked(&pool->flush_ser, 0); init_waitqueue_head(&pool->force_wait); pool->thread = kthread_run(ib_fmr_cleanup_thread, @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool } spin_unlock_irq(&pool->pool_lock); - serial = atomic_inc_return(&pool->req_ser); + serial = atomic_inc_return_unchecked(&pool->req_ser); wake_up_process(pool->thread); if (wait_event_interruptible(pool->force_wait, - atomic_read(&pool->flush_ser) - serial >= 0)) + atomic_read_unchecked(&pool->flush_ser) - serial >= 0)) return -EINTR; return 0; @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr } else { list_add_tail(&fmr->list, &pool->dirty_list); if (++pool->dirty_len >= pool->dirty_watermark) { - atomic_inc(&pool->req_ser); + atomic_inc_unchecked(&pool->req_ser); wake_up_process(pool->thread); } } diff -urNp linux-2.6.39.1/drivers/infiniband/hw/cxgb4/mem.c linux-2.6.39.1/drivers/infiniband/hw/cxgb4/mem.c --- linux-2.6.39.1/drivers/infiniband/hw/cxgb4/mem.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/infiniband/hw/cxgb4/mem.c 2011-05-22 19:36:31.000000000 -0400 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r int err; struct fw_ri_tpte tpt; u32 stag_idx; - static atomic_t key; + static atomic_unchecked_t key; if (c4iw_fatal_error(rdev)) return -EIO; @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r &rdev->resource.tpt_fifo_lock); if (!stag_idx) return -ENOMEM; - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff); } PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", __func__, stag_state, type, pdid, stag_idx); diff -urNp linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_dma.c linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_dma.c --- linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_dma.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_dma.c 2011-05-22 19:36:31.000000000 -0400 @@ -175,7 +175,7 @@ static void ipath_dma_free_coherent(stru free_pages((unsigned long) cpu_addr, get_order(size)); } -struct ib_dma_mapping_ops ipath_dma_mapping_ops = { +const struct ib_dma_mapping_ops ipath_dma_mapping_ops = { ipath_mapping_error, ipath_dma_map_single, ipath_dma_unmap_single, diff -urNp linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_fs.c --- linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-22 19:36:31.000000000 -0400 @@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru struct infinipath_counters counters; struct ipath_devdata *dd; + pax_track_stack(); + dd = file->f_path.dentry->d_inode->i_private; dd->ipath_f_read_counters(dd, &counters); diff -urNp linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_rc.c linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_rc.c --- linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_rc.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_rc.c 2011-05-22 19:36:31.000000000 -0400 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de struct ib_atomic_eth *ateth; struct ipath_ack_entry *e; u64 vaddr; - atomic64_t *maddr; + atomic64_unchecked_t *maddr; u64 sdata; u32 rkey; u8 next; @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de IB_ACCESS_REMOTE_ATOMIC))) goto nack_acc_unlck; /* Perform atomic OP and save result. */ - maddr = (atomic64_t *) qp->r_sge.sge.vaddr; + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr; sdata = be64_to_cpu(ateth->swap_data); e = &qp->s_ack_queue[qp->r_head_ack_queue]; e->atomic_data = (opcode == OP(FETCH_ADD)) ? - (u64) atomic64_add_return(sdata, maddr) - sdata : + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata : (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, be64_to_cpu(ateth->compare_data), sdata); diff -urNp linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_ruc.c linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_ruc.c --- linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-05-22 19:36:31.000000000 -0400 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip unsigned long flags; struct ib_wc wc; u64 sdata; - atomic64_t *maddr; + atomic64_unchecked_t *maddr; enum ib_wc_status send_status; /* @@ -382,11 +382,11 @@ again: IB_ACCESS_REMOTE_ATOMIC))) goto acc_err; /* Perform atomic OP and save result. */ - maddr = (atomic64_t *) qp->r_sge.sge.vaddr; + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr; sdata = wqe->wr.wr.atomic.compare_add; *(u64 *) sqp->s_sge.sge.vaddr = (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? - (u64) atomic64_add_return(sdata, maddr) - sdata : + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata : (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, sdata, wqe->wr.wr.atomic.swap); goto send_comp; diff -urNp linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_verbs.h linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_verbs.h --- linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_verbs.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/infiniband/hw/ipath/ipath_verbs.h 2011-05-22 19:36:31.000000000 -0400 @@ -931,6 +931,6 @@ extern unsigned int ib_ipath_max_srq_wrs extern const u32 ib_ipath_rnr_table[]; -extern struct ib_dma_mapping_ops ipath_dma_mapping_ops; +extern const struct ib_dma_mapping_ops ipath_dma_mapping_ops; #endif /* IPATH_VERBS_H */ diff -urNp linux-2.6.39.1/drivers/infiniband/hw/nes/nes.c linux-2.6.39.1/drivers/infiniband/hw/nes/nes.c --- linux-2.6.39.1/drivers/infiniband/hw/nes/nes.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/infiniband/hw/nes/nes.c 2011-05-22 19:36:31.000000000 -0400 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi LIST_HEAD(nes_adapter_list); static LIST_HEAD(nes_dev_list); -atomic_t qps_destroyed; +atomic_unchecked_t qps_destroyed; static unsigned int ee_flsh_adapter; static unsigned int sysfs_nonidx_addr; @@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str struct nes_qp *nesqp = cqp_request->cqp_callback_pointer; struct nes_adapter *nesadapter = nesdev->nesadapter; - atomic_inc(&qps_destroyed); + atomic_inc_unchecked(&qps_destroyed); /* Free the control structures */ diff -urNp linux-2.6.39.1/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.39.1/drivers/infiniband/hw/nes/nes_cm.c --- linux-2.6.39.1/drivers/infiniband/hw/nes/nes_cm.c 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/infiniband/hw/nes/nes_cm.c 2011-05-22 19:36:31.000000000 -0400 @@ -68,14 +68,14 @@ u32 cm_packets_dropped; u32 cm_packets_retrans; u32 cm_packets_created; u32 cm_packets_received; -atomic_t cm_listens_created; -atomic_t cm_listens_destroyed; +atomic_unchecked_t cm_listens_created; +atomic_unchecked_t cm_listens_destroyed; u32 cm_backlog_drops; -atomic_t cm_loopbacks; -atomic_t cm_nodes_created; -atomic_t cm_nodes_destroyed; -atomic_t cm_accel_dropped_pkts; -atomic_t cm_resets_recvd; +atomic_unchecked_t cm_loopbacks; +atomic_unchecked_t cm_nodes_created; +atomic_unchecked_t cm_nodes_destroyed; +atomic_unchecked_t cm_accel_dropped_pkts; +atomic_unchecked_t cm_resets_recvd; static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *); @@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = { static struct nes_cm_core *g_cm_core; -atomic_t cm_connects; -atomic_t cm_accepts; -atomic_t cm_disconnects; -atomic_t cm_closes; -atomic_t cm_connecteds; -atomic_t cm_connect_reqs; -atomic_t cm_rejects; +atomic_unchecked_t cm_connects; +atomic_unchecked_t cm_accepts; +atomic_unchecked_t cm_disconnects; +atomic_unchecked_t cm_closes; +atomic_unchecked_t cm_connecteds; +atomic_unchecked_t cm_connect_reqs; +atomic_unchecked_t cm_rejects; /** @@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str kfree(listener); listener = NULL; ret = 0; - atomic_inc(&cm_listens_destroyed); + atomic_inc_unchecked(&cm_listens_destroyed); } else { spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); } @@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node( cm_node->rem_mac); add_hte_node(cm_core, cm_node); - atomic_inc(&cm_nodes_created); + atomic_inc_unchecked(&cm_nodes_created); return cm_node; } @@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm } atomic_dec(&cm_core->node_cnt); - atomic_inc(&cm_nodes_destroyed); + atomic_inc_unchecked(&cm_nodes_destroyed); nesqp = cm_node->nesqp; if (nesqp) { nesqp->cm_node = NULL; @@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm static void drop_packet(struct sk_buff *skb) { - atomic_inc(&cm_accel_dropped_pkts); + atomic_inc_unchecked(&cm_accel_dropped_pkts); dev_kfree_skb_any(skb); } @@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm { int reset = 0; /* whether to send reset in case of err.. */ - atomic_inc(&cm_resets_recvd); + atomic_inc_unchecked(&cm_resets_recvd); nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u." " refcnt=%d\n", cm_node, cm_node->state, atomic_read(&cm_node->ref_count)); @@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne rem_ref_cm_node(cm_node->cm_core, cm_node); return NULL; } - atomic_inc(&cm_loopbacks); + atomic_inc_unchecked(&cm_loopbacks); loopbackremotenode->loopbackpartner = cm_node; loopbackremotenode->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE; @@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c add_ref_cm_node(cm_node); } else if (cm_node->state == NES_CM_STATE_TSA) { rem_ref_cm_node(cm_core, cm_node); - atomic_inc(&cm_accel_dropped_pkts); + atomic_inc_unchecked(&cm_accel_dropped_pkts); dev_kfree_skb_any(skb); break; } @@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne if ((cm_id) && (cm_id->event_handler)) { if (issue_disconn) { - atomic_inc(&cm_disconnects); + atomic_inc_unchecked(&cm_disconnects); cm_event.event = IW_CM_EVENT_DISCONNECT; cm_event.status = disconn_status; cm_event.local_addr = cm_id->local_addr; @@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne } if (issue_close) { - atomic_inc(&cm_closes); + atomic_inc_unchecked(&cm_closes); nes_disconnect(nesqp, 1); cm_id->provider_data = nesqp; @@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n", nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener); - atomic_inc(&cm_accepts); + atomic_inc_unchecked(&cm_accepts); nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", netdev_refcnt_read(nesvnic->netdev)); @@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c struct nes_cm_core *cm_core; - atomic_inc(&cm_rejects); + atomic_inc_unchecked(&cm_rejects); cm_node = (struct nes_cm_node *) cm_id->provider_data; loopback = cm_node->loopbackpartner; cm_core = cm_node->cm_core; @@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id, ntohl(cm_id->local_addr.sin_addr.s_addr), ntohs(cm_id->local_addr.sin_port)); - atomic_inc(&cm_connects); + atomic_inc_unchecked(&cm_connects); nesqp->active_conn = 1; /* cache the cm_id in the qp */ @@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node); return err; } - atomic_inc(&cm_listens_created); + atomic_inc_unchecked(&cm_listens_created); } cm_id->add_ref(cm_id); @@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne if (nesqp->destroyed) { return; } - atomic_inc(&cm_connecteds); + atomic_inc_unchecked(&cm_connecteds); nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on" " local port 0x%04X. jiffies = %lu.\n", nesqp->hwqp.qp_id, @@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm cm_id->add_ref(cm_id); ret = cm_id->event_handler(cm_id, &cm_event); - atomic_inc(&cm_closes); + atomic_inc_unchecked(&cm_closes); cm_event.event = IW_CM_EVENT_CLOSE; cm_event.status = IW_CM_EVENT_STATUS_OK; cm_event.provider_data = cm_id->provider_data; @@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_ return; cm_id = cm_node->cm_id; - atomic_inc(&cm_connect_reqs); + atomic_inc_unchecked(&cm_connect_reqs); nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", cm_node, cm_id, jiffies); @@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n return; cm_id = cm_node->cm_id; - atomic_inc(&cm_connect_reqs); + atomic_inc_unchecked(&cm_connect_reqs); nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n", cm_node, cm_id, jiffies); diff -urNp linux-2.6.39.1/drivers/infiniband/hw/nes/nes.h linux-2.6.39.1/drivers/infiniband/hw/nes/nes.h --- linux-2.6.39.1/drivers/infiniband/hw/nes/nes.h 2011-05-19 00:06:34.000000000 -0400 +++ linux-2.6.39.1/drivers/infiniband/hw/nes/nes.h 2011-05-22 19:36:31.000000000 -0400 @@ -175,17 +175,17 @@ extern unsigned int nes_debug_level; extern unsigned int wqm_quanta; extern struct list_head nes_adapter_list; -extern atomic_t cm_connects; -extern atomic_t cm_accepts; -extern atomic_t cm_disconnects; -extern atomic_t cm_closes; -extern atomic_t cm_connecteds; -extern atomic_t cm_connect_reqs; -extern atomic_t cm_rejects; -extern atomic_t mod_qp_timouts; -extern atomic_t qps_created; -extern atomic_t qps_destroyed; -extern atomic_t sw_qps_destroyed; +extern atomic_unchecked_t cm_connects; +extern atomic_unchecked_t cm_accepts; +extern atomic_unchecked_t cm_disconnects; +extern atomic_unchecked_t cm_closes; +extern atomic_unchecked_t cm_connecteds; +extern atomic_unchecked_t cm_connect_reqs; +extern atomic_unchecked_t cm_rejects; +extern atomic_unchecked_t mod_qp_timouts; +extern atomic_unchecked_t qps_created; +extern atomic_unchecked_t qps_destroyed; +extern atomic_unchecked_t sw_qps_destroyed; extern u32 mh_detected; extern u32 mh_pauses_sent; extern u32 cm_packets_sent; @@ -194,14 +194,14 @@ extern u32 cm_packets_created; extern u32 cm_packets_received; extern u32 cm_packets_dropped; extern u32 cm_packets_retrans; -extern atomic_t cm_listens_created; -extern atomic_t cm_listens_destroyed; +extern atomic_unchecked_t cm_listens_created; +extern atomic_unchecked_t cm_listens_destroyed; extern u32 cm_backlog_drops; -extern atomic_t cm_loopbacks; -extern atomic_t cm_nodes_created; -extern atomic_t cm_nodes_destroyed; -extern atomic_t cm_accel_dropped_pkts; -extern atomic_t cm_rese