Commit d99cf715a0751b0c819cdd8616c8870c1dd51910

Authored by Adrian Bunk
Committed by Linus Torvalds
1 parent 7ef9390541

[PATCH] xtensa: replace 'extern inline' with 'static inline'

"extern inline" doesn't make sense.

Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Chris Zankel <chris@zankel.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 13 changed files with 70 additions and 70 deletions Side-by-side Diff

include/asm-xtensa/atomic.h
... ... @@ -66,7 +66,7 @@
66 66 *
67 67 * Atomically adds @i to @v.
68 68 */
69   -extern __inline__ void atomic_add(int i, atomic_t * v)
  69 +static inline void atomic_add(int i, atomic_t * v)
70 70 {
71 71 unsigned int vval;
72 72  
... ... @@ -90,7 +90,7 @@
90 90 *
91 91 * Atomically subtracts @i from @v.
92 92 */
93   -extern __inline__ void atomic_sub(int i, atomic_t *v)
  93 +static inline void atomic_sub(int i, atomic_t *v)
94 94 {
95 95 unsigned int vval;
96 96  
... ... @@ -111,7 +111,7 @@
111 111 * We use atomic_{add|sub}_return to define other functions.
112 112 */
113 113  
114   -extern __inline__ int atomic_add_return(int i, atomic_t * v)
  114 +static inline int atomic_add_return(int i, atomic_t * v)
115 115 {
116 116 unsigned int vval;
117 117  
... ... @@ -130,7 +130,7 @@
130 130 return vval;
131 131 }
132 132  
133   -extern __inline__ int atomic_sub_return(int i, atomic_t * v)
  133 +static inline int atomic_sub_return(int i, atomic_t * v)
134 134 {
135 135 unsigned int vval;
136 136  
... ... @@ -224,7 +224,7 @@
224 224 #define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
225 225  
226 226  
227   -extern __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
  227 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
228 228 {
229 229 unsigned int all_f = -1;
230 230 unsigned int vval;
... ... @@ -243,7 +243,7 @@
243 243 );
244 244 }
245 245  
246   -extern __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
  246 +static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
247 247 {
248 248 unsigned int vval;
249 249  
include/asm-xtensa/checksum.h
... ... @@ -47,14 +47,14 @@
47 47 * If you use these functions directly please don't forget the
48 48 * verify_area().
49 49 */
50   -extern __inline__
  50 +static inline
51 51 unsigned int csum_partial_copy_nocheck ( const char *src, char *dst,
52 52 int len, int sum)
53 53 {
54 54 return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
55 55 }
56 56  
57   -extern __inline__
  57 +static inline
58 58 unsigned int csum_partial_copy_from_user ( const char *src, char *dst,
59 59 int len, int sum, int *err_ptr)
60 60 {
include/asm-xtensa/delay.h
... ... @@ -18,7 +18,7 @@
18 18  
19 19 extern unsigned long loops_per_jiffy;
20 20  
21   -extern __inline__ void __delay(unsigned long loops)
  21 +static inline void __delay(unsigned long loops)
22 22 {
23 23 /* 2 cycles per loop. */
24 24 __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b"
include/asm-xtensa/io.h
... ... @@ -41,12 +41,12 @@
41 41 * These are trivial on the 1:1 Linux/Xtensa mapping
42 42 */
43 43  
44   -extern inline unsigned long virt_to_phys(volatile void * address)
  44 +static inline unsigned long virt_to_phys(volatile void * address)
45 45 {
46 46 return PHYSADDR((unsigned long)address);
47 47 }
48 48  
49   -extern inline void * phys_to_virt(unsigned long address)
  49 +static inline void * phys_to_virt(unsigned long address)
50 50 {
51 51 return (void*) CACHED_ADDR(address);
52 52 }
53 53  
... ... @@ -55,12 +55,12 @@
55 55 * IO bus memory addresses are also 1:1 with the physical address
56 56 */
57 57  
58   -extern inline unsigned long virt_to_bus(volatile void * address)
  58 +static inline unsigned long virt_to_bus(volatile void * address)
59 59 {
60 60 return PHYSADDR((unsigned long)address);
61 61 }
62 62  
63   -extern inline void * bus_to_virt (unsigned long address)
  63 +static inline void * bus_to_virt (unsigned long address)
64 64 {
65 65 return (void *) CACHED_ADDR(address);
66 66 }
67 67  
68 68  
... ... @@ -69,17 +69,17 @@
69 69 * Change "struct page" to physical address.
70 70 */
71 71  
72   -extern inline void *ioremap(unsigned long offset, unsigned long size)
  72 +static inline void *ioremap(unsigned long offset, unsigned long size)
73 73 {
74 74 return (void *) CACHED_ADDR_IO(offset);
75 75 }
76 76  
77   -extern inline void *ioremap_nocache(unsigned long offset, unsigned long size)
  77 +static inline void *ioremap_nocache(unsigned long offset, unsigned long size)
78 78 {
79 79 return (void *) BYPASS_ADDR_IO(offset);
80 80 }
81 81  
82   -extern inline void iounmap(void *addr)
  82 +static inline void iounmap(void *addr)
83 83 {
84 84 }
85 85  
include/asm-xtensa/mmu_context.h
... ... @@ -199,13 +199,13 @@
199 199 #define ASID_FIRST_VERSION \
200 200 ((unsigned long)(~ASID_VERSION_MASK) + 1 + ASID_FIRST_NONRESERVED)
201 201  
202   -extern inline void set_rasid_register (unsigned long val)
  202 +static inline void set_rasid_register (unsigned long val)
203 203 {
204 204 __asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t"
205 205 " isync\n" : : "a" (val));
206 206 }
207 207  
208   -extern inline unsigned long get_rasid_register (void)
  208 +static inline unsigned long get_rasid_register (void)
209 209 {
210 210 unsigned long tmp;
211 211 __asm__ __volatile__ (" rsr %0, "__stringify(RASID)"\n\t" : "=a" (tmp));
... ... @@ -215,7 +215,7 @@
215 215  
216 216 #if ((XCHAL_MMU_ASID_INVALID == 0) && (XCHAL_MMU_ASID_KERNEL == 1))
217 217  
218   -extern inline void
  218 +static inline void
219 219 get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
220 220 {
221 221 extern void flush_tlb_all(void);
... ... @@ -234,7 +234,7 @@
234 234 /* XCHAL_MMU_ASID_INVALID == 0 and XCHAL_MMU_ASID_KERNEL ==1 are
235 235 really the best, but if you insist... */
236 236  
237   -extern inline int validate_asid (unsigned long asid)
  237 +static inline int validate_asid (unsigned long asid)
238 238 {
239 239 switch (asid) {
240 240 case XCHAL_MMU_ASID_INVALID:
... ... @@ -247,7 +247,7 @@
247 247 return 1; /* valid */
248 248 }
249 249  
250   -extern inline void
  250 +static inline void
251 251 get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
252 252 {
253 253 extern void flush_tlb_all(void);
254 254  
... ... @@ -274,14 +274,14 @@
274 274 * instance.
275 275 */
276 276  
277   -extern inline int
  277 +static inline int
278 278 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
279 279 {
280 280 mm->context = NO_CONTEXT;
281 281 return 0;
282 282 }
283 283  
284   -extern inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  284 +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
285 285 struct task_struct *tsk)
286 286 {
287 287 unsigned long asid = asid_cache;
... ... @@ -301,7 +301,7 @@
301 301 * Destroy context related info for an mm_struct that is about
302 302 * to be put to rest.
303 303 */
304   -extern inline void destroy_context(struct mm_struct *mm)
  304 +static inline void destroy_context(struct mm_struct *mm)
305 305 {
306 306 /* Nothing to do. */
307 307 }
... ... @@ -310,7 +310,7 @@
310 310 * After we have set current->mm to a new value, this activates
311 311 * the context for the new mm so we see the new mappings.
312 312 */
313   -extern inline void
  313 +static inline void
314 314 activate_mm(struct mm_struct *prev, struct mm_struct *next)
315 315 {
316 316 /* Unconditionally get a new ASID. */
include/asm-xtensa/page.h
... ... @@ -55,7 +55,7 @@
55 55 * Pure 2^n version of get_order
56 56 */
57 57  
58   -extern __inline__ int get_order(unsigned long size)
  58 +static inline int get_order(unsigned long size)
59 59 {
60 60 int order;
61 61 #ifndef XCHAL_HAVE_NSU
include/asm-xtensa/pci.h
... ... @@ -22,12 +22,12 @@
22 22  
23 23 extern struct pci_controller* pcibios_alloc_controller(void);
24 24  
25   -extern inline void pcibios_set_master(struct pci_dev *dev)
  25 +static inline void pcibios_set_master(struct pci_dev *dev)
26 26 {
27 27 /* No special bus mastering setup handling */
28 28 }
29 29  
30   -extern inline void pcibios_penalize_isa_irq(int irq)
  30 +static inline void pcibios_penalize_isa_irq(int irq)
31 31 {
32 32 /* We don't do dynamic PCI IRQ allocation */
33 33 }
include/asm-xtensa/pgtable.h
... ... @@ -260,7 +260,7 @@
260 260 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
261 261 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
262 262  
263   -extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  263 +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
264 264 {
265 265 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
266 266 }
267 267  
... ... @@ -278,14 +278,14 @@
278 278 #endif
279 279 }
280 280  
281   -extern inline void
  281 +static inline void
282 282 set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
283 283 {
284 284 update_pte(ptep, pteval);
285 285 }
286 286  
287 287  
288   -extern inline void
  288 +static inline void
289 289 set_pmd(pmd_t *pmdp, pmd_t pmdval)
290 290 {
291 291 *pmdp = pmdval;
include/asm-xtensa/semaphore.h
... ... @@ -47,7 +47,7 @@
47 47 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
48 48 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
49 49  
50   -extern inline void sema_init (struct semaphore *sem, int val)
  50 +static inline void sema_init (struct semaphore *sem, int val)
51 51 {
52 52 /*
53 53 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
... ... @@ -79,7 +79,7 @@
79 79  
80 80 extern spinlock_t semaphore_wake_lock;
81 81  
82   -extern __inline__ void down(struct semaphore * sem)
  82 +static inline void down(struct semaphore * sem)
83 83 {
84 84 #if WAITQUEUE_DEBUG
85 85 CHECK_MAGIC(sem->__magic);
... ... @@ -89,7 +89,7 @@
89 89 __down(sem);
90 90 }
91 91  
92   -extern __inline__ int down_interruptible(struct semaphore * sem)
  92 +static inline int down_interruptible(struct semaphore * sem)
93 93 {
94 94 int ret = 0;
95 95 #if WAITQUEUE_DEBUG
... ... @@ -101,7 +101,7 @@
101 101 return ret;
102 102 }
103 103  
104   -extern __inline__ int down_trylock(struct semaphore * sem)
  104 +static inline int down_trylock(struct semaphore * sem)
105 105 {
106 106 int ret = 0;
107 107 #if WAITQUEUE_DEBUG
... ... @@ -117,7 +117,7 @@
117 117 * Note! This is subtle. We jump to wake people up only if
118 118 * the semaphore was negative (== somebody was waiting on it).
119 119 */
120   -extern __inline__ void up(struct semaphore * sem)
  120 +static inline void up(struct semaphore * sem)
121 121 {
122 122 #if WAITQUEUE_DEBUG
123 123 CHECK_MAGIC(sem->__magic);
include/asm-xtensa/string.h
... ... @@ -16,7 +16,7 @@
16 16 #define _XTENSA_STRING_H
17 17  
18 18 #define __HAVE_ARCH_STRCPY
19   -extern __inline__ char *strcpy(char *__dest, const char *__src)
  19 +static inline char *strcpy(char *__dest, const char *__src)
20 20 {
21 21 register char *__xdest = __dest;
22 22 unsigned long __dummy;
... ... @@ -35,7 +35,7 @@
35 35 }
36 36  
37 37 #define __HAVE_ARCH_STRNCPY
38   -extern __inline__ char *strncpy(char *__dest, const char *__src, size_t __n)
  38 +static inline char *strncpy(char *__dest, const char *__src, size_t __n)
39 39 {
40 40 register char *__xdest = __dest;
41 41 unsigned long __dummy;
... ... @@ -60,7 +60,7 @@
60 60 }
61 61  
62 62 #define __HAVE_ARCH_STRCMP
63   -extern __inline__ int strcmp(const char *__cs, const char *__ct)
  63 +static inline int strcmp(const char *__cs, const char *__ct)
64 64 {
65 65 register int __res;
66 66 unsigned long __dummy;
... ... @@ -82,7 +82,7 @@
82 82 }
83 83  
84 84 #define __HAVE_ARCH_STRNCMP
85   -extern __inline__ int strncmp(const char *__cs, const char *__ct, size_t __n)
  85 +static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
86 86 {
87 87 register int __res;
88 88 unsigned long __dummy;
include/asm-xtensa/system.h
... ... @@ -56,7 +56,7 @@
56 56  
57 57 #define clear_cpenable() __clear_cpenable()
58 58  
59   -extern __inline__ void __clear_cpenable(void)
  59 +static inline void __clear_cpenable(void)
60 60 {
61 61 #if XCHAL_HAVE_CP
62 62 unsigned long i = 0;
... ... @@ -64,7 +64,7 @@
64 64 #endif
65 65 }
66 66  
67   -extern __inline__ void enable_coprocessor(int i)
  67 +static inline void enable_coprocessor(int i)
68 68 {
69 69 #if XCHAL_HAVE_CP
70 70 int cp;
... ... @@ -74,7 +74,7 @@
74 74 #endif
75 75 }
76 76  
77   -extern __inline__ void disable_coprocessor(int i)
  77 +static inline void disable_coprocessor(int i)
78 78 {
79 79 #if XCHAL_HAVE_CP
80 80 int cp;
... ... @@ -123,7 +123,7 @@
123 123 * cmpxchg
124 124 */
125 125  
126   -extern __inline__ unsigned long
  126 +static inline unsigned long
127 127 __cmpxchg_u32(volatile int *p, int old, int new)
128 128 {
129 129 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
... ... @@ -173,7 +173,7 @@
173 173 * where no register reference will cause an overflow.
174 174 */
175 175  
176   -extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
  176 +static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
177 177 {
178 178 unsigned long tmp;
179 179 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
include/asm-xtensa/tlbflush.h
... ... @@ -39,7 +39,7 @@
39 39 * page-table pages.
40 40 */
41 41  
42   -extern inline void flush_tlb_pgtables(struct mm_struct *mm,
  42 +static inline void flush_tlb_pgtables(struct mm_struct *mm,
43 43 unsigned long start, unsigned long end)
44 44 {
45 45 }
46 46  
47 47  
48 48  
... ... @@ -51,26 +51,26 @@
51 51 #define ITLB_PROBE_SUCCESS (1 << ITLB_WAYS_LOG2)
52 52 #define DTLB_PROBE_SUCCESS (1 << DTLB_WAYS_LOG2)
53 53  
54   -extern inline unsigned long itlb_probe(unsigned long addr)
  54 +static inline unsigned long itlb_probe(unsigned long addr)
55 55 {
56 56 unsigned long tmp;
57 57 __asm__ __volatile__("pitlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
58 58 return tmp;
59 59 }
60 60  
61   -extern inline unsigned long dtlb_probe(unsigned long addr)
  61 +static inline unsigned long dtlb_probe(unsigned long addr)
62 62 {
63 63 unsigned long tmp;
64 64 __asm__ __volatile__("pdtlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
65 65 return tmp;
66 66 }
67 67  
68   -extern inline void invalidate_itlb_entry (unsigned long probe)
  68 +static inline void invalidate_itlb_entry (unsigned long probe)
69 69 {
70 70 __asm__ __volatile__("iitlb %0; isync\n\t" : : "a" (probe));
71 71 }
72 72  
73   -extern inline void invalidate_dtlb_entry (unsigned long probe)
  73 +static inline void invalidate_dtlb_entry (unsigned long probe)
74 74 {
75 75 __asm__ __volatile__("idtlb %0; dsync\n\t" : : "a" (probe));
76 76 }
77 77  
78 78  
79 79  
80 80  
81 81  
82 82  
83 83  
84 84  
85 85  
86 86  
... ... @@ -80,68 +80,68 @@
80 80 * caller must follow up with an 'isync', which can be relatively
81 81 * expensive on some Xtensa implementations.
82 82 */
83   -extern inline void invalidate_itlb_entry_no_isync (unsigned entry)
  83 +static inline void invalidate_itlb_entry_no_isync (unsigned entry)
84 84 {
85 85 /* Caller must follow up with 'isync'. */
86 86 __asm__ __volatile__ ("iitlb %0\n" : : "a" (entry) );
87 87 }
88 88  
89   -extern inline void invalidate_dtlb_entry_no_isync (unsigned entry)
  89 +static inline void invalidate_dtlb_entry_no_isync (unsigned entry)
90 90 {
91 91 /* Caller must follow up with 'isync'. */
92 92 __asm__ __volatile__ ("idtlb %0\n" : : "a" (entry) );
93 93 }
94 94  
95   -extern inline void set_itlbcfg_register (unsigned long val)
  95 +static inline void set_itlbcfg_register (unsigned long val)
96 96 {
97 97 __asm__ __volatile__("wsr %0, "__stringify(ITLBCFG)"\n\t" "isync\n\t"
98 98 : : "a" (val));
99 99 }
100 100  
101   -extern inline void set_dtlbcfg_register (unsigned long val)
  101 +static inline void set_dtlbcfg_register (unsigned long val)
102 102 {
103 103 __asm__ __volatile__("wsr %0, "__stringify(DTLBCFG)"; dsync\n\t"
104 104 : : "a" (val));
105 105 }
106 106  
107   -extern inline void set_ptevaddr_register (unsigned long val)
  107 +static inline void set_ptevaddr_register (unsigned long val)
108 108 {
109 109 __asm__ __volatile__(" wsr %0, "__stringify(PTEVADDR)"; isync\n"
110 110 : : "a" (val));
111 111 }
112 112  
113   -extern inline unsigned long read_ptevaddr_register (void)
  113 +static inline unsigned long read_ptevaddr_register (void)
114 114 {
115 115 unsigned long tmp;
116 116 __asm__ __volatile__("rsr %0, "__stringify(PTEVADDR)"\n\t" : "=a" (tmp));
117 117 return tmp;
118 118 }
119 119  
120   -extern inline void write_dtlb_entry (pte_t entry, int way)
  120 +static inline void write_dtlb_entry (pte_t entry, int way)
121 121 {
122 122 __asm__ __volatile__("wdtlb %1, %0; dsync\n\t"
123 123 : : "r" (way), "r" (entry) );
124 124 }
125 125  
126   -extern inline void write_itlb_entry (pte_t entry, int way)
  126 +static inline void write_itlb_entry (pte_t entry, int way)
127 127 {
128 128 __asm__ __volatile__("witlb %1, %0; isync\n\t"
129 129 : : "r" (way), "r" (entry) );
130 130 }
131 131  
132   -extern inline void invalidate_page_directory (void)
  132 +static inline void invalidate_page_directory (void)
133 133 {
134 134 invalidate_dtlb_entry (DTLB_WAY_PGTABLE);
135 135 }
136 136  
137   -extern inline void invalidate_itlb_mapping (unsigned address)
  137 +static inline void invalidate_itlb_mapping (unsigned address)
138 138 {
139 139 unsigned long tlb_entry;
140 140 while ((tlb_entry = itlb_probe (address)) & ITLB_PROBE_SUCCESS)
141 141 invalidate_itlb_entry (tlb_entry);
142 142 }
143 143  
144   -extern inline void invalidate_dtlb_mapping (unsigned address)
  144 +static inline void invalidate_dtlb_mapping (unsigned address)
145 145 {
146 146 unsigned long tlb_entry;
147 147 while ((tlb_entry = dtlb_probe (address)) & DTLB_PROBE_SUCCESS)
148 148  
149 149  
150 150  
... ... @@ -165,28 +165,28 @@
165 165 * as[07..00] contain the asid
166 166 */
167 167  
168   -extern inline unsigned long read_dtlb_virtual (int way)
  168 +static inline unsigned long read_dtlb_virtual (int way)
169 169 {
170 170 unsigned long tmp;
171 171 __asm__ __volatile__("rdtlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
172 172 return tmp;
173 173 }
174 174  
175   -extern inline unsigned long read_dtlb_translation (int way)
  175 +static inline unsigned long read_dtlb_translation (int way)
176 176 {
177 177 unsigned long tmp;
178 178 __asm__ __volatile__("rdtlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
179 179 return tmp;
180 180 }
181 181  
182   -extern inline unsigned long read_itlb_virtual (int way)
  182 +static inline unsigned long read_itlb_virtual (int way)
183 183 {
184 184 unsigned long tmp;
185 185 __asm__ __volatile__("ritlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
186 186 return tmp;
187 187 }
188 188  
189   -extern inline unsigned long read_itlb_translation (int way)
  189 +static inline unsigned long read_itlb_translation (int way)
190 190 {
191 191 unsigned long tmp;
192 192 __asm__ __volatile__("ritlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
include/asm-xtensa/uaccess.h
... ... @@ -211,7 +211,7 @@
211 211 #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
212 212 #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
213 213  
214   -extern inline int verify_area(int type, const void * addr, unsigned long size)
  214 +static inline int verify_area(int type, const void * addr, unsigned long size)
215 215 {
216 216 return access_ok(type,addr,size) ? 0 : -EFAULT;
217 217 }
... ... @@ -464,7 +464,7 @@
464 464 * success.
465 465 */
466 466  
467   -extern inline unsigned long
  467 +static inline unsigned long
468 468 __xtensa_clear_user(void *addr, unsigned long size)
469 469 {
470 470 if ( ! memset(addr, 0, size) )
... ... @@ -472,7 +472,7 @@
472 472 return 0;
473 473 }
474 474  
475   -extern inline unsigned long
  475 +static inline unsigned long
476 476 clear_user(void *addr, unsigned long size)
477 477 {
478 478 if (access_ok(VERIFY_WRITE, addr, size))
... ... @@ -486,7 +486,7 @@
486 486 extern long __strncpy_user(char *, const char *, long);
487 487 #define __strncpy_from_user __strncpy_user
488 488  
489   -extern inline long
  489 +static inline long
490 490 strncpy_from_user(char *dst, const char *src, long count)
491 491 {
492 492 if (access_ok(VERIFY_READ, src, 1))
... ... @@ -502,7 +502,7 @@
502 502 */
503 503 extern long __strnlen_user(const char *, long);
504 504  
505   -extern inline long strnlen_user(const char *str, long len)
  505 +static inline long strnlen_user(const char *str, long len)
506 506 {
507 507 unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1;
508 508