Commit aafe4dbed0bf6cbdb2e9f03e1d42f8a540d8541d
Committed by
Arnd Bergmann
1 parent
9858c60cc2
Exists in
master
and in
4 other branches
asm-generic: add generic versions of common headers
These are all kernel internal interfaces that get copied around a lot. In most cases, architectures can provide their own optimized versions, but these generic versions can work as well. I have tried to use the most common contents of each header to allow existing architectures to migrate easily. Thanks to Remis for suggesting a number of cleanups. Signed-off-by: Remis Lima Baima <remis.developer@googlemail.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Showing 18 changed files with 558 additions and 0 deletions Side-by-side Diff
- include/asm-generic/bugs.h
- include/asm-generic/current.h
- include/asm-generic/delay.h
- include/asm-generic/fb.h
- include/asm-generic/hardirq.h
- include/asm-generic/irq.h
- include/asm-generic/irqflags.h
- include/asm-generic/kmap_types.h
- include/asm-generic/linkage.h
- include/asm-generic/module.h
- include/asm-generic/mutex.h
- include/asm-generic/scatterlist.h
- include/asm-generic/spinlock.h
- include/asm-generic/string.h
- include/asm-generic/syscalls.h
- include/asm-generic/system.h
- include/asm-generic/unaligned.h
- include/asm-generic/user.h
include/asm-generic/bugs.h
include/asm-generic/current.h
include/asm-generic/delay.h
include/asm-generic/fb.h
include/asm-generic/hardirq.h
1 | +#ifndef __ASM_GENERIC_HARDIRQ_H | |
2 | +#define __ASM_GENERIC_HARDIRQ_H | |
3 | + | |
4 | +#include <linux/cache.h> | |
5 | +#include <linux/threads.h> | |
6 | +#include <linux/irq.h> | |
7 | + | |
8 | +typedef struct { | |
9 | + unsigned long __softirq_pending; | |
10 | +} ____cacheline_aligned irq_cpustat_t; | |
11 | + | |
12 | +#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | |
13 | + | |
14 | +#ifndef HARDIRQ_BITS | |
15 | +#define HARDIRQ_BITS 8 | |
16 | +#endif | |
17 | + | |
18 | +/* | |
19 | + * The hardirq mask has to be large enough to have | |
20 | + * space for potentially all IRQ sources in the system | |
21 | + * nesting on a single CPU: | |
22 | + */ | |
23 | +#if (1 << HARDIRQ_BITS) < NR_IRQS | |
24 | +# error HARDIRQ_BITS is too low! | |
25 | +#endif | |
26 | + | |
27 | +#ifndef ack_bad_irq | |
28 | +static inline void ack_bad_irq(unsigned int irq) | |
29 | +{ | |
30 | + printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); | |
31 | +} | |
32 | +#endif | |
33 | + | |
34 | +#endif /* __ASM_GENERIC_HARDIRQ_H */ |
include/asm-generic/irq.h
1 | +#ifndef __ASM_GENERIC_IRQ_H | |
2 | +#define __ASM_GENERIC_IRQ_H | |
3 | + | |
4 | +/* | |
5 | + * NR_IRQS is the upper bound of how many interrupts can be handled | |
6 | + * in the platform. It is used to size the static irq_map array, | |
7 | + * so don't make it too big. | |
8 | + */ | |
9 | +#ifndef NR_IRQS | |
10 | +#define NR_IRQS 64 | |
11 | +#endif | |
12 | + | |
13 | +static inline int irq_canonicalize(int irq) | |
14 | +{ | |
15 | + return irq; | |
16 | +} | |
17 | + | |
18 | +#endif /* __ASM_GENERIC_IRQ_H */ |
include/asm-generic/irqflags.h
1 | +#ifndef __ASM_GENERIC_IRQFLAGS_H | |
2 | +#define __ASM_GENERIC_IRQFLAGS_H | |
3 | + | |
4 | +/* | |
5 | + * All architectures should implement at least the first two functions, | |
6 | + * usually inline assembly will be the best way. | |
7 | + */ | |
8 | +#ifndef RAW_IRQ_DISABLED | |
9 | +#define RAW_IRQ_DISABLED 0 | |
10 | +#define RAW_IRQ_ENABLED 1 | |
11 | +#endif | |
12 | + | |
13 | +/* read interrupt enabled status */ | |
14 | +#ifndef __raw_local_save_flags | |
15 | +unsigned long __raw_local_save_flags(void); | |
16 | +#endif | |
17 | + | |
18 | +/* set interrupt enabled status */ | |
19 | +#ifndef raw_local_irq_restore | |
20 | +void raw_local_irq_restore(unsigned long flags); | |
21 | +#endif | |
22 | + | |
23 | +/* get status and disable interrupts */ | |
24 | +#ifndef __raw_local_irq_save | |
25 | +static inline unsigned long __raw_local_irq_save(void) | |
26 | +{ | |
27 | + unsigned long flags; | |
28 | + flags = __raw_local_save_flags(); | |
29 | + raw_local_irq_restore(RAW_IRQ_DISABLED); | |
30 | + return flags; | |
31 | +} | |
32 | +#endif | |
33 | + | |
34 | +/* test flags */ | |
35 | +#ifndef raw_irqs_disabled_flags | |
36 | +static inline int raw_irqs_disabled_flags(unsigned long flags) | |
37 | +{ | |
38 | + return flags == RAW_IRQ_DISABLED; | |
39 | +} | |
40 | +#endif | |
41 | + | |
42 | +/* unconditionally enable interrupts */ | |
43 | +#ifndef raw_local_irq_enable | |
44 | +static inline void raw_local_irq_enable(void) | |
45 | +{ | |
46 | + raw_local_irq_restore(RAW_IRQ_ENABLED); | |
47 | +} | |
48 | +#endif | |
49 | + | |
50 | +/* unconditionally disable interrupts */ | |
51 | +#ifndef raw_local_irq_disable | |
52 | +static inline void raw_local_irq_disable(void) | |
53 | +{ | |
54 | + raw_local_irq_restore(RAW_IRQ_DISABLED); | |
55 | +} | |
56 | +#endif | |
57 | + | |
58 | +/* test hardware interrupt enable bit */ | |
59 | +#ifndef raw_irqs_disabled | |
60 | +static inline int raw_irqs_disabled(void) | |
61 | +{ | |
62 | + return raw_irqs_disabled_flags(__raw_local_save_flags()); | |
63 | +} | |
64 | +#endif | |
65 | + | |
66 | +#define raw_local_save_flags(flags) \ | |
67 | + do { (flags) = __raw_local_save_flags(); } while (0) | |
68 | + | |
69 | +#define raw_local_irq_save(flags) \ | |
70 | + do { (flags) = __raw_local_irq_save(); } while (0) | |
71 | + | |
72 | +#endif /* __ASM_GENERIC_IRQFLAGS_H */ |
include/asm-generic/kmap_types.h
1 | +#ifndef _ASM_GENERIC_KMAP_TYPES_H | |
2 | +#define _ASM_GENERIC_KMAP_TYPES_H | |
3 | + | |
4 | +#ifdef CONFIG_DEBUG_HIGHMEM | |
5 | +# define D(n) __KM_FENCE_##n , | |
6 | +#else | |
7 | +# define D(n) | |
8 | +#endif | |
9 | + | |
10 | +enum km_type { | |
11 | +D(0) KM_BOUNCE_READ, | |
12 | +D(1) KM_SKB_SUNRPC_DATA, | |
13 | +D(2) KM_SKB_DATA_SOFTIRQ, | |
14 | +D(3) KM_USER0, | |
15 | +D(4) KM_USER1, | |
16 | +D(5) KM_BIO_SRC_IRQ, | |
17 | +D(6) KM_BIO_DST_IRQ, | |
18 | +D(7) KM_PTE0, | |
19 | +D(8) KM_PTE1, | |
20 | +D(9) KM_IRQ0, | |
21 | +D(10) KM_IRQ1, | |
22 | +D(11) KM_SOFTIRQ0, | |
23 | +D(12) KM_SOFTIRQ1, | |
24 | +D(13) KM_SYNC_ICACHE, | |
25 | +D(14) KM_SYNC_DCACHE, | |
26 | +D(15) KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */ | |
27 | +D(16) KM_TYPE_NR | |
28 | +}; | |
29 | + | |
30 | +#undef D | |
31 | + | |
32 | +#endif |
include/asm-generic/linkage.h
include/asm-generic/module.h
1 | +#ifndef __ASM_GENERIC_MODULE_H | |
2 | +#define __ASM_GENERIC_MODULE_H | |
3 | + | |
4 | +/* | |
5 | + * Many architectures just need a simple module | |
6 | + * loader without arch specific data. | |
7 | + */ | |
8 | +struct mod_arch_specific | |
9 | +{ | |
10 | +}; | |
11 | + | |
12 | +#ifdef CONFIG_64BIT | |
13 | +#define Elf_Shdr Elf64_Shdr | |
14 | +#define Elf_Sym Elf64_Sym | |
15 | +#define Elf_Ehdr Elf64_Ehdr | |
16 | +#else | |
17 | +#define Elf_Shdr Elf32_Shdr | |
18 | +#define Elf_Sym Elf32_Sym | |
19 | +#define Elf_Ehdr Elf32_Ehdr | |
20 | +#endif | |
21 | + | |
22 | +#endif /* __ASM_GENERIC_MODULE_H */ |
include/asm-generic/mutex.h
include/asm-generic/scatterlist.h
1 | +#ifndef __ASM_GENERIC_SCATTERLIST_H | |
2 | +#define __ASM_GENERIC_SCATTERLIST_H | |
3 | + | |
4 | +#include <linux/types.h> | |
5 | + | |
6 | +struct scatterlist { | |
7 | +#ifdef CONFIG_DEBUG_SG | |
8 | + unsigned long sg_magic; | |
9 | +#endif | |
10 | + unsigned long page_link; | |
11 | + unsigned int offset; | |
12 | + unsigned int length; | |
13 | + dma_addr_t dma_address; | |
14 | + unsigned int dma_length; | |
15 | +}; | |
16 | + | |
17 | +/* | |
18 | + * These macros should be used after a dma_map_sg call has been done | |
19 | + * to get bus addresses of each of the SG entries and their lengths. | |
20 | + * You should only work with the number of sg entries pci_map_sg | |
21 | + * returns, or alternatively stop on the first sg_dma_len(sg) which | |
22 | + * is 0. | |
23 | + */ | |
24 | +#define sg_dma_address(sg) ((sg)->dma_address) | |
25 | +#ifndef sg_dma_len | |
26 | +/* | |
27 | + * Normally, you have an iommu on 64 bit machines, but not on 32 bit | |
28 | + * machines. Architectures that are differnt should override this. | |
29 | + */ | |
30 | +#if __BITS_PER_LONG == 64 | |
31 | +#define sg_dma_len(sg) ((sg)->dma_length) | |
32 | +#else | |
33 | +#define sg_dma_len(sg) ((sg)->length) | |
34 | +#endif /* 64 bit */ | |
35 | +#endif /* sg_dma_len */ | |
36 | + | |
37 | +#ifndef ISA_DMA_THRESHOLD | |
38 | +#define ISA_DMA_THRESHOLD (~0UL) | |
39 | +#endif | |
40 | + | |
41 | +#define ARCH_HAS_SG_CHAIN | |
42 | + | |
43 | +#endif /* __ASM_GENERIC_SCATTERLIST_H */ |
include/asm-generic/spinlock.h
1 | +#ifndef __ASM_GENERIC_SPINLOCK_H | |
2 | +#define __ASM_GENERIC_SPINLOCK_H | |
3 | +/* | |
4 | + * You need to implement asm/spinlock.h for SMP support. The generic | |
5 | + * version does not handle SMP. | |
6 | + */ | |
7 | +#ifdef CONFIG_SMP | |
8 | +#error need an architecture specific asm/spinlock.h | |
9 | +#endif | |
10 | + | |
11 | +#endif /* __ASM_GENERIC_SPINLOCK_H */ |
include/asm-generic/string.h
1 | +#ifndef __ASM_GENERIC_STRING_H | |
2 | +#define __ASM_GENERIC_STRING_H | |
3 | +/* | |
4 | + * The kernel provides all required functions in lib/string.c | |
5 | + * | |
6 | + * Architectures probably want to provide at least their own optimized | |
7 | + * memcpy and memset functions though. | |
8 | + */ | |
9 | + | |
10 | +#endif /* __ASM_GENERIC_STRING_H */ |
include/asm-generic/syscalls.h
1 | +#ifndef __ASM_GENERIC_SYSCALLS_H | |
2 | +#define __ASM_GENERIC_SYSCALLS_H | |
3 | + | |
4 | +#include <linux/compiler.h> | |
5 | +#include <linux/linkage.h> | |
6 | + | |
7 | +/* | |
8 | + * Calling conventions for these system calls can differ, so | |
9 | + * it's possible to override them. | |
10 | + */ | |
11 | +#ifndef sys_clone | |
12 | +asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, | |
13 | + void __user *parent_tid, void __user *child_tid, | |
14 | + struct pt_regs *regs); | |
15 | +#endif | |
16 | + | |
17 | +#ifndef sys_fork | |
18 | +asmlinkage long sys_fork(struct pt_regs *regs); | |
19 | +#endif | |
20 | + | |
21 | +#ifndef sys_vfork | |
22 | +asmlinkage long sys_vfork(struct pt_regs *regs); | |
23 | +#endif | |
24 | + | |
25 | +#ifndef sys_execve | |
26 | +asmlinkage long sys_execve(char __user *filename, char __user * __user *argv, | |
27 | + char __user * __user *envp, struct pt_regs *regs); | |
28 | +#endif | |
29 | + | |
30 | +#ifndef sys_mmap2 | |
31 | +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | |
32 | + unsigned long prot, unsigned long flags, | |
33 | + unsigned long fd, unsigned long pgoff); | |
34 | +#endif | |
35 | + | |
36 | +#ifndef sys_mmap | |
37 | +asmlinkage long sys_mmap(unsigned long addr, unsigned long len, | |
38 | + unsigned long prot, unsigned long flags, | |
39 | + unsigned long fd, off_t pgoff); | |
40 | +#endif | |
41 | + | |
42 | +#ifndef sys_sigaltstack | |
43 | +asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *, | |
44 | + struct pt_regs *); | |
45 | +#endif | |
46 | + | |
47 | +#ifndef sys_rt_sigreturn | |
48 | +asmlinkage long sys_rt_sigreturn(struct pt_regs *regs); | |
49 | +#endif | |
50 | + | |
51 | +#ifndef sys_rt_sigsuspend | |
52 | +asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize); | |
53 | +#endif | |
54 | + | |
55 | +#ifndef sys_rt_sigaction | |
56 | +asmlinkage long sys_rt_sigaction(int sig, const struct sigaction __user *act, | |
57 | + struct sigaction __user *oact, size_t sigsetsize); | |
58 | +#endif | |
59 | + | |
60 | +#endif /* __ASM_GENERIC_SYSCALLS_H */ |
include/asm-generic/system.h
1 | +/* Generic system definitions, based on MN10300 definitions. | |
2 | + * | |
3 | + * It should be possible to use these on really simple architectures, | |
4 | + * but it serves more as a starting point for new ports. | |
5 | + * | |
6 | + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
7 | + * Written by David Howells (dhowells@redhat.com) | |
8 | + * | |
9 | + * This program is free software; you can redistribute it and/or | |
10 | + * modify it under the terms of the GNU General Public Licence | |
11 | + * as published by the Free Software Foundation; either version | |
12 | + * 2 of the Licence, or (at your option) any later version. | |
13 | + */ | |
14 | +#ifndef __ASM_GENERIC_SYSTEM_H | |
15 | +#define __ASM_GENERIC_SYSTEM_H | |
16 | + | |
17 | +#ifdef __KERNEL__ | |
18 | +#ifndef __ASSEMBLY__ | |
19 | + | |
20 | +#include <linux/types.h> | |
21 | +#include <linux/irqflags.h> | |
22 | + | |
23 | +#include <asm/cmpxchg-local.h> | |
24 | + | |
25 | +struct task_struct; | |
26 | + | |
27 | +/* context switching is now performed out-of-line in switch_to.S */ | |
28 | +extern struct task_struct *__switch_to(struct task_struct *, | |
29 | + struct task_struct *); | |
30 | +#define switch_to(prev, next, last) \ | |
31 | + do { \ | |
32 | + ((last) = __switch_to((prev), (next))); \ | |
33 | + } while (0) | |
34 | + | |
35 | +#define arch_align_stack(x) (x) | |
36 | + | |
37 | +#define nop() asm volatile ("nop") | |
38 | + | |
39 | +#endif /* !__ASSEMBLY__ */ | |
40 | + | |
41 | +/* | |
42 | + * Force strict CPU ordering. | |
43 | + * And yes, this is required on UP too when we're talking | |
44 | + * to devices. | |
45 | + * | |
46 | + * This implementation only contains a compiler barrier. | |
47 | + */ | |
48 | + | |
49 | +#define mb() asm volatile ("": : :"memory") | |
50 | +#define rmb() mb() | |
51 | +#define wmb() asm volatile ("": : :"memory") | |
52 | + | |
53 | +#ifdef CONFIG_SMP | |
54 | +#define smp_mb() mb() | |
55 | +#define smp_rmb() rmb() | |
56 | +#define smp_wmb() wmb() | |
57 | +#else | |
58 | +#define smp_mb() barrier() | |
59 | +#define smp_rmb() barrier() | |
60 | +#define smp_wmb() barrier() | |
61 | +#endif | |
62 | + | |
63 | +#define set_mb(var, value) do { var = value; mb(); } while (0) | |
64 | +#define set_wmb(var, value) do { var = value; wmb(); } while (0) | |
65 | + | |
66 | +#define read_barrier_depends() do {} while (0) | |
67 | +#define smp_read_barrier_depends() do {} while (0) | |
68 | + | |
69 | +/* | |
70 | + * we make sure local_irq_enable() doesn't cause priority inversion | |
71 | + */ | |
72 | +#ifndef __ASSEMBLY__ | |
73 | + | |
74 | +/* This function doesn't exist, so you'll get a linker error | |
75 | + * if something tries to do an invalid xchg(). */ | |
76 | +extern void __xchg_called_with_bad_pointer(void); | |
77 | + | |
78 | +static inline | |
79 | +unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | |
80 | +{ | |
81 | + unsigned long ret, flags; | |
82 | + | |
83 | + switch (size) { | |
84 | + case 1: | |
85 | +#ifdef __xchg_u8 | |
86 | + return __xchg_u8(x, ptr); | |
87 | +#else | |
88 | + local_irq_save(flags); | |
89 | + ret = *(volatile u8 *)ptr; | |
90 | + *(volatile u8 *)ptr = x; | |
91 | + local_irq_restore(flags); | |
92 | + return ret; | |
93 | +#endif /* __xchg_u8 */ | |
94 | + | |
95 | + case 2: | |
96 | +#ifdef __xchg_u16 | |
97 | + return __xchg_u16(x, ptr); | |
98 | +#else | |
99 | + local_irq_save(flags); | |
100 | + ret = *(volatile u16 *)ptr; | |
101 | + *(volatile u16 *)ptr = x; | |
102 | + local_irq_restore(flags); | |
103 | + return ret; | |
104 | +#endif /* __xchg_u16 */ | |
105 | + | |
106 | + case 4: | |
107 | +#ifdef __xchg_u32 | |
108 | + return __xchg_u32(x, ptr); | |
109 | +#else | |
110 | + local_irq_save(flags); | |
111 | + ret = *(volatile u32 *)ptr; | |
112 | + *(volatile u32 *)ptr = x; | |
113 | + local_irq_restore(flags); | |
114 | + return ret; | |
115 | +#endif /* __xchg_u32 */ | |
116 | + | |
117 | +#ifdef CONFIG_64BIT | |
118 | + case 8: | |
119 | +#ifdef __xchg_u64 | |
120 | + return __xchg_u64(x, ptr); | |
121 | +#else | |
122 | + local_irq_save(flags); | |
123 | + ret = *(volatile u64 *)ptr; | |
124 | + *(volatile u64 *)ptr = x; | |
125 | + local_irq_restore(flags); | |
126 | + return ret; | |
127 | +#endif /* __xchg_u64 */ | |
128 | +#endif /* CONFIG_64BIT */ | |
129 | + | |
130 | + default: | |
131 | + __xchg_called_with_bad_pointer(); | |
132 | + return x; | |
133 | + } | |
134 | +} | |
135 | + | |
136 | +#define xchg(ptr, x) \ | |
137 | + ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) | |
138 | + | |
139 | +static inline unsigned long __cmpxchg(volatile unsigned long *m, | |
140 | + unsigned long old, unsigned long new) | |
141 | +{ | |
142 | + unsigned long retval; | |
143 | + unsigned long flags; | |
144 | + | |
145 | + local_irq_save(flags); | |
146 | + retval = *m; | |
147 | + if (retval == old) | |
148 | + *m = new; | |
149 | + local_irq_restore(flags); | |
150 | + return retval; | |
151 | +} | |
152 | + | |
153 | +#define cmpxchg(ptr, o, n) \ | |
154 | + ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \ | |
155 | + (unsigned long)(o), \ | |
156 | + (unsigned long)(n))) | |
157 | + | |
158 | +#endif /* !__ASSEMBLY__ */ | |
159 | + | |
160 | +#endif /* __KERNEL__ */ | |
161 | +#endif /* __ASM_GENERIC_SYSTEM_H */ |
include/asm-generic/unaligned.h
1 | +#ifndef __ASM_GENERIC_UNALIGNED_H | |
2 | +#define __ASM_GENERIC_UNALIGNED_H | |
3 | + | |
4 | +/* | |
5 | + * This is the most generic implementation of unaligned accesses | |
6 | + * and should work almost anywhere. | |
7 | + * | |
8 | + * If an architecture can handle unaligned accesses in hardware, | |
9 | + * it may want to use the linux/unaligned/access_ok.h implementation | |
10 | + * instead. | |
11 | + */ | |
12 | +#include <asm/byteorder.h> | |
13 | + | |
14 | +#if defined(__LITTLE_ENDIAN) | |
15 | +# include <linux/unaligned/le_struct.h> | |
16 | +# include <linux/unaligned/be_byteshift.h> | |
17 | +# include <linux/unaligned/generic.h> | |
18 | +# define get_unaligned __get_unaligned_le | |
19 | +# define put_unaligned __put_unaligned_le | |
20 | +#elif defined(__BIG_ENDIAN) | |
21 | +# include <linux/unaligned/be_struct.h> | |
22 | +# include <linux/unaligned/le_byteshift.h> | |
23 | +# include <linux/unaligned/generic.h> | |
24 | +# define get_unaligned __get_unaligned_be | |
25 | +# define put_unaligned __put_unaligned_be | |
26 | +#else | |
27 | +# error need to define endianess | |
28 | +#endif | |
29 | + | |
30 | +#endif /* __ASM_GENERIC_UNALIGNED_H */ |
include/asm-generic/user.h