Commit 268364a0f48aee2f851f9d1ef8a6cda0f3039ef1
Committed by
Ingo Molnar
1 parent
d210baf53b
Exists in
master
and in
7 other branches
IO resources: add reserve_region_with_split()
add reserve_region_with_split() to not lose e820 reserved entries if they overlap with existing IO regions: with test case by extend 0xe0000000 - 0xeffffff to 0xdd800000 - we get: e0000000-efffffff : PCI MMCONFIG 0 e0000000-efffffff : reserved and in /proc/iomem we get: found conflict for reserved [dd800000, efffffff], try to reserve with split __reserve_region_with_split: (PCI Bus #80) [dd000000, ddffffff], res: (reserved) [dd800000, efffffff] __reserve_region_with_split: (PCI Bus #00) [de000000, dfffffff], res: (reserved) [de000000, efffffff] initcall pci_subsys_init+0x0/0x121 returned 0 after 381 msecs in dmesg various fixes and improvements suggested by Linus. Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 2 changed files with 71 additions and 0 deletions Inline Diff
include/linux/ioport.h
1 | /* | 1 | /* |
2 | * ioport.h Definitions of routines for detecting, reserving and | 2 | * ioport.h Definitions of routines for detecting, reserving and |
3 | * allocating system resources. | 3 | * allocating system resources. |
4 | * | 4 | * |
5 | * Authors: Linus Torvalds | 5 | * Authors: Linus Torvalds |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifndef _LINUX_IOPORT_H | 8 | #ifndef _LINUX_IOPORT_H |
9 | #define _LINUX_IOPORT_H | 9 | #define _LINUX_IOPORT_H |
10 | 10 | ||
11 | #ifndef __ASSEMBLY__ | 11 | #ifndef __ASSEMBLY__ |
12 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | /* | 14 | /* |
15 | * Resources are tree-like, allowing | 15 | * Resources are tree-like, allowing |
16 | * nesting etc.. | 16 | * nesting etc.. |
17 | */ | 17 | */ |
18 | struct resource { | 18 | struct resource { |
19 | resource_size_t start; | 19 | resource_size_t start; |
20 | resource_size_t end; | 20 | resource_size_t end; |
21 | const char *name; | 21 | const char *name; |
22 | unsigned long flags; | 22 | unsigned long flags; |
23 | struct resource *parent, *sibling, *child; | 23 | struct resource *parent, *sibling, *child; |
24 | }; | 24 | }; |
25 | 25 | ||
26 | struct resource_list { | 26 | struct resource_list { |
27 | struct resource_list *next; | 27 | struct resource_list *next; |
28 | struct resource *res; | 28 | struct resource *res; |
29 | struct pci_dev *dev; | 29 | struct pci_dev *dev; |
30 | }; | 30 | }; |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * IO resources have these defined flags. | 33 | * IO resources have these defined flags. |
34 | */ | 34 | */ |
35 | #define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */ | 35 | #define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */ |
36 | 36 | ||
37 | #define IORESOURCE_IO 0x00000100 /* Resource type */ | 37 | #define IORESOURCE_IO 0x00000100 /* Resource type */ |
38 | #define IORESOURCE_MEM 0x00000200 | 38 | #define IORESOURCE_MEM 0x00000200 |
39 | #define IORESOURCE_IRQ 0x00000400 | 39 | #define IORESOURCE_IRQ 0x00000400 |
40 | #define IORESOURCE_DMA 0x00000800 | 40 | #define IORESOURCE_DMA 0x00000800 |
41 | 41 | ||
42 | #define IORESOURCE_PREFETCH 0x00001000 /* No side effects */ | 42 | #define IORESOURCE_PREFETCH 0x00001000 /* No side effects */ |
43 | #define IORESOURCE_READONLY 0x00002000 | 43 | #define IORESOURCE_READONLY 0x00002000 |
44 | #define IORESOURCE_CACHEABLE 0x00004000 | 44 | #define IORESOURCE_CACHEABLE 0x00004000 |
45 | #define IORESOURCE_RANGELENGTH 0x00008000 | 45 | #define IORESOURCE_RANGELENGTH 0x00008000 |
46 | #define IORESOURCE_SHADOWABLE 0x00010000 | 46 | #define IORESOURCE_SHADOWABLE 0x00010000 |
47 | 47 | ||
48 | #define IORESOURCE_SIZEALIGN 0x00020000 /* size indicates alignment */ | 48 | #define IORESOURCE_SIZEALIGN 0x00020000 /* size indicates alignment */ |
49 | #define IORESOURCE_STARTALIGN 0x00040000 /* start field is alignment */ | 49 | #define IORESOURCE_STARTALIGN 0x00040000 /* start field is alignment */ |
50 | 50 | ||
51 | #define IORESOURCE_DISABLED 0x10000000 | 51 | #define IORESOURCE_DISABLED 0x10000000 |
52 | #define IORESOURCE_UNSET 0x20000000 | 52 | #define IORESOURCE_UNSET 0x20000000 |
53 | #define IORESOURCE_AUTO 0x40000000 | 53 | #define IORESOURCE_AUTO 0x40000000 |
54 | #define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */ | 54 | #define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */ |
55 | 55 | ||
56 | /* PnP IRQ specific bits (IORESOURCE_BITS) */ | 56 | /* PnP IRQ specific bits (IORESOURCE_BITS) */ |
57 | #define IORESOURCE_IRQ_HIGHEDGE (1<<0) | 57 | #define IORESOURCE_IRQ_HIGHEDGE (1<<0) |
58 | #define IORESOURCE_IRQ_LOWEDGE (1<<1) | 58 | #define IORESOURCE_IRQ_LOWEDGE (1<<1) |
59 | #define IORESOURCE_IRQ_HIGHLEVEL (1<<2) | 59 | #define IORESOURCE_IRQ_HIGHLEVEL (1<<2) |
60 | #define IORESOURCE_IRQ_LOWLEVEL (1<<3) | 60 | #define IORESOURCE_IRQ_LOWLEVEL (1<<3) |
61 | #define IORESOURCE_IRQ_SHAREABLE (1<<4) | 61 | #define IORESOURCE_IRQ_SHAREABLE (1<<4) |
62 | #define IORESOURCE_IRQ_OPTIONAL (1<<5) | 62 | #define IORESOURCE_IRQ_OPTIONAL (1<<5) |
63 | 63 | ||
64 | /* PnP DMA specific bits (IORESOURCE_BITS) */ | 64 | /* PnP DMA specific bits (IORESOURCE_BITS) */ |
65 | #define IORESOURCE_DMA_TYPE_MASK (3<<0) | 65 | #define IORESOURCE_DMA_TYPE_MASK (3<<0) |
66 | #define IORESOURCE_DMA_8BIT (0<<0) | 66 | #define IORESOURCE_DMA_8BIT (0<<0) |
67 | #define IORESOURCE_DMA_8AND16BIT (1<<0) | 67 | #define IORESOURCE_DMA_8AND16BIT (1<<0) |
68 | #define IORESOURCE_DMA_16BIT (2<<0) | 68 | #define IORESOURCE_DMA_16BIT (2<<0) |
69 | 69 | ||
70 | #define IORESOURCE_DMA_MASTER (1<<2) | 70 | #define IORESOURCE_DMA_MASTER (1<<2) |
71 | #define IORESOURCE_DMA_BYTE (1<<3) | 71 | #define IORESOURCE_DMA_BYTE (1<<3) |
72 | #define IORESOURCE_DMA_WORD (1<<4) | 72 | #define IORESOURCE_DMA_WORD (1<<4) |
73 | 73 | ||
74 | #define IORESOURCE_DMA_SPEED_MASK (3<<6) | 74 | #define IORESOURCE_DMA_SPEED_MASK (3<<6) |
75 | #define IORESOURCE_DMA_COMPATIBLE (0<<6) | 75 | #define IORESOURCE_DMA_COMPATIBLE (0<<6) |
76 | #define IORESOURCE_DMA_TYPEA (1<<6) | 76 | #define IORESOURCE_DMA_TYPEA (1<<6) |
77 | #define IORESOURCE_DMA_TYPEB (2<<6) | 77 | #define IORESOURCE_DMA_TYPEB (2<<6) |
78 | #define IORESOURCE_DMA_TYPEF (3<<6) | 78 | #define IORESOURCE_DMA_TYPEF (3<<6) |
79 | 79 | ||
80 | /* PnP memory I/O specific bits (IORESOURCE_BITS) */ | 80 | /* PnP memory I/O specific bits (IORESOURCE_BITS) */ |
81 | #define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */ | 81 | #define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */ |
82 | #define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */ | 82 | #define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */ |
83 | #define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */ | 83 | #define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */ |
84 | #define IORESOURCE_MEM_TYPE_MASK (3<<3) | 84 | #define IORESOURCE_MEM_TYPE_MASK (3<<3) |
85 | #define IORESOURCE_MEM_8BIT (0<<3) | 85 | #define IORESOURCE_MEM_8BIT (0<<3) |
86 | #define IORESOURCE_MEM_16BIT (1<<3) | 86 | #define IORESOURCE_MEM_16BIT (1<<3) |
87 | #define IORESOURCE_MEM_8AND16BIT (2<<3) | 87 | #define IORESOURCE_MEM_8AND16BIT (2<<3) |
88 | #define IORESOURCE_MEM_32BIT (3<<3) | 88 | #define IORESOURCE_MEM_32BIT (3<<3) |
89 | #define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */ | 89 | #define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */ |
90 | #define IORESOURCE_MEM_EXPANSIONROM (1<<6) | 90 | #define IORESOURCE_MEM_EXPANSIONROM (1<<6) |
91 | 91 | ||
92 | /* PnP I/O specific bits (IORESOURCE_BITS) */ | 92 | /* PnP I/O specific bits (IORESOURCE_BITS) */ |
93 | #define IORESOURCE_IO_16BIT_ADDR (1<<0) | 93 | #define IORESOURCE_IO_16BIT_ADDR (1<<0) |
94 | #define IORESOURCE_IO_FIXED (1<<1) | 94 | #define IORESOURCE_IO_FIXED (1<<1) |
95 | 95 | ||
96 | /* PCI ROM control bits (IORESOURCE_BITS) */ | 96 | /* PCI ROM control bits (IORESOURCE_BITS) */ |
97 | #define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */ | 97 | #define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */ |
98 | #define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */ | 98 | #define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */ |
99 | #define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */ | 99 | #define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */ |
100 | #define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */ | 100 | #define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */ |
101 | 101 | ||
102 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ | 102 | /* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
103 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ | 103 | #define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ |
104 | 104 | ||
105 | /* PC/ISA/whatever - the normal PC address spaces: IO and memory */ | 105 | /* PC/ISA/whatever - the normal PC address spaces: IO and memory */ |
106 | extern struct resource ioport_resource; | 106 | extern struct resource ioport_resource; |
107 | extern struct resource iomem_resource; | 107 | extern struct resource iomem_resource; |
108 | 108 | ||
109 | extern int request_resource(struct resource *root, struct resource *new); | 109 | extern int request_resource(struct resource *root, struct resource *new); |
110 | extern int release_resource(struct resource *new); | 110 | extern int release_resource(struct resource *new); |
111 | extern void reserve_region_with_split(struct resource *root, | ||
112 | resource_size_t start, resource_size_t end, | ||
113 | const char *name); | ||
111 | extern int insert_resource(struct resource *parent, struct resource *new); | 114 | extern int insert_resource(struct resource *parent, struct resource *new); |
112 | extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); | 115 | extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); |
113 | extern int allocate_resource(struct resource *root, struct resource *new, | 116 | extern int allocate_resource(struct resource *root, struct resource *new, |
114 | resource_size_t size, resource_size_t min, | 117 | resource_size_t size, resource_size_t min, |
115 | resource_size_t max, resource_size_t align, | 118 | resource_size_t max, resource_size_t align, |
116 | void (*alignf)(void *, struct resource *, | 119 | void (*alignf)(void *, struct resource *, |
117 | resource_size_t, resource_size_t), | 120 | resource_size_t, resource_size_t), |
118 | void *alignf_data); | 121 | void *alignf_data); |
119 | int adjust_resource(struct resource *res, resource_size_t start, | 122 | int adjust_resource(struct resource *res, resource_size_t start, |
120 | resource_size_t size); | 123 | resource_size_t size); |
121 | resource_size_t resource_alignment(struct resource *res); | 124 | resource_size_t resource_alignment(struct resource *res); |
122 | static inline resource_size_t resource_size(struct resource *res) | 125 | static inline resource_size_t resource_size(struct resource *res) |
123 | { | 126 | { |
124 | return res->end - res->start + 1; | 127 | return res->end - res->start + 1; |
125 | } | 128 | } |
126 | 129 | ||
127 | /* Convenience shorthand with allocation */ | 130 | /* Convenience shorthand with allocation */ |
128 | #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name)) | 131 | #define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name)) |
129 | #define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name)) | 132 | #define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name)) |
130 | #define rename_region(region, newname) do { (region)->name = (newname); } while (0) | 133 | #define rename_region(region, newname) do { (region)->name = (newname); } while (0) |
131 | 134 | ||
132 | extern struct resource * __request_region(struct resource *, | 135 | extern struct resource * __request_region(struct resource *, |
133 | resource_size_t start, | 136 | resource_size_t start, |
134 | resource_size_t n, const char *name); | 137 | resource_size_t n, const char *name); |
135 | 138 | ||
136 | /* Compatibility cruft */ | 139 | /* Compatibility cruft */ |
137 | #define release_region(start,n) __release_region(&ioport_resource, (start), (n)) | 140 | #define release_region(start,n) __release_region(&ioport_resource, (start), (n)) |
138 | #define check_mem_region(start,n) __check_region(&iomem_resource, (start), (n)) | 141 | #define check_mem_region(start,n) __check_region(&iomem_resource, (start), (n)) |
139 | #define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n)) | 142 | #define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n)) |
140 | 143 | ||
141 | extern int __check_region(struct resource *, resource_size_t, resource_size_t); | 144 | extern int __check_region(struct resource *, resource_size_t, resource_size_t); |
142 | extern void __release_region(struct resource *, resource_size_t, | 145 | extern void __release_region(struct resource *, resource_size_t, |
143 | resource_size_t); | 146 | resource_size_t); |
144 | 147 | ||
145 | static inline int __deprecated check_region(resource_size_t s, | 148 | static inline int __deprecated check_region(resource_size_t s, |
146 | resource_size_t n) | 149 | resource_size_t n) |
147 | { | 150 | { |
148 | return __check_region(&ioport_resource, s, n); | 151 | return __check_region(&ioport_resource, s, n); |
149 | } | 152 | } |
150 | 153 | ||
151 | /* Wrappers for managed devices */ | 154 | /* Wrappers for managed devices */ |
152 | struct device; | 155 | struct device; |
153 | #define devm_request_region(dev,start,n,name) \ | 156 | #define devm_request_region(dev,start,n,name) \ |
154 | __devm_request_region(dev, &ioport_resource, (start), (n), (name)) | 157 | __devm_request_region(dev, &ioport_resource, (start), (n), (name)) |
155 | #define devm_request_mem_region(dev,start,n,name) \ | 158 | #define devm_request_mem_region(dev,start,n,name) \ |
156 | __devm_request_region(dev, &iomem_resource, (start), (n), (name)) | 159 | __devm_request_region(dev, &iomem_resource, (start), (n), (name)) |
157 | 160 | ||
158 | extern struct resource * __devm_request_region(struct device *dev, | 161 | extern struct resource * __devm_request_region(struct device *dev, |
159 | struct resource *parent, resource_size_t start, | 162 | struct resource *parent, resource_size_t start, |
160 | resource_size_t n, const char *name); | 163 | resource_size_t n, const char *name); |
161 | 164 | ||
162 | #define devm_release_region(start,n) \ | 165 | #define devm_release_region(start,n) \ |
163 | __devm_release_region(dev, &ioport_resource, (start), (n)) | 166 | __devm_release_region(dev, &ioport_resource, (start), (n)) |
164 | #define devm_release_mem_region(start,n) \ | 167 | #define devm_release_mem_region(start,n) \ |
165 | __devm_release_region(dev, &iomem_resource, (start), (n)) | 168 | __devm_release_region(dev, &iomem_resource, (start), (n)) |
166 | 169 | ||
167 | extern void __devm_release_region(struct device *dev, struct resource *parent, | 170 | extern void __devm_release_region(struct device *dev, struct resource *parent, |
168 | resource_size_t start, resource_size_t n); | 171 | resource_size_t start, resource_size_t n); |
169 | 172 | ||
170 | #endif /* __ASSEMBLY__ */ | 173 | #endif /* __ASSEMBLY__ */ |
171 | #endif /* _LINUX_IOPORT_H */ | 174 | #endif /* _LINUX_IOPORT_H */ |
172 | 175 |
kernel/resource.c
1 | /* | 1 | /* |
2 | * linux/kernel/resource.c | 2 | * linux/kernel/resource.c |
3 | * | 3 | * |
4 | * Copyright (C) 1999 Linus Torvalds | 4 | * Copyright (C) 1999 Linus Torvalds |
5 | * Copyright (C) 1999 Martin Mares <mj@ucw.cz> | 5 | * Copyright (C) 1999 Martin Mares <mj@ucw.cz> |
6 | * | 6 | * |
7 | * Arbitrary resource management. | 7 | * Arbitrary resource management. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/errno.h> | 11 | #include <linux/errno.h> |
12 | #include <linux/ioport.h> | 12 | #include <linux/ioport.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
17 | #include <linux/proc_fs.h> | 17 | #include <linux/proc_fs.h> |
18 | #include <linux/seq_file.h> | 18 | #include <linux/seq_file.h> |
19 | #include <linux/device.h> | 19 | #include <linux/device.h> |
20 | #include <asm/io.h> | 20 | #include <asm/io.h> |
21 | 21 | ||
22 | 22 | ||
23 | struct resource ioport_resource = { | 23 | struct resource ioport_resource = { |
24 | .name = "PCI IO", | 24 | .name = "PCI IO", |
25 | .start = 0, | 25 | .start = 0, |
26 | .end = IO_SPACE_LIMIT, | 26 | .end = IO_SPACE_LIMIT, |
27 | .flags = IORESOURCE_IO, | 27 | .flags = IORESOURCE_IO, |
28 | }; | 28 | }; |
29 | EXPORT_SYMBOL(ioport_resource); | 29 | EXPORT_SYMBOL(ioport_resource); |
30 | 30 | ||
31 | struct resource iomem_resource = { | 31 | struct resource iomem_resource = { |
32 | .name = "PCI mem", | 32 | .name = "PCI mem", |
33 | .start = 0, | 33 | .start = 0, |
34 | .end = -1, | 34 | .end = -1, |
35 | .flags = IORESOURCE_MEM, | 35 | .flags = IORESOURCE_MEM, |
36 | }; | 36 | }; |
37 | EXPORT_SYMBOL(iomem_resource); | 37 | EXPORT_SYMBOL(iomem_resource); |
38 | 38 | ||
39 | static DEFINE_RWLOCK(resource_lock); | 39 | static DEFINE_RWLOCK(resource_lock); |
40 | 40 | ||
41 | #ifdef CONFIG_PROC_FS | 41 | #ifdef CONFIG_PROC_FS |
42 | 42 | ||
43 | enum { MAX_IORES_LEVEL = 5 }; | 43 | enum { MAX_IORES_LEVEL = 5 }; |
44 | 44 | ||
45 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) | 45 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) |
46 | { | 46 | { |
47 | struct resource *p = v; | 47 | struct resource *p = v; |
48 | (*pos)++; | 48 | (*pos)++; |
49 | if (p->child) | 49 | if (p->child) |
50 | return p->child; | 50 | return p->child; |
51 | while (!p->sibling && p->parent) | 51 | while (!p->sibling && p->parent) |
52 | p = p->parent; | 52 | p = p->parent; |
53 | return p->sibling; | 53 | return p->sibling; |
54 | } | 54 | } |
55 | 55 | ||
56 | static void *r_start(struct seq_file *m, loff_t *pos) | 56 | static void *r_start(struct seq_file *m, loff_t *pos) |
57 | __acquires(resource_lock) | 57 | __acquires(resource_lock) |
58 | { | 58 | { |
59 | struct resource *p = m->private; | 59 | struct resource *p = m->private; |
60 | loff_t l = 0; | 60 | loff_t l = 0; |
61 | read_lock(&resource_lock); | 61 | read_lock(&resource_lock); |
62 | for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) | 62 | for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) |
63 | ; | 63 | ; |
64 | return p; | 64 | return p; |
65 | } | 65 | } |
66 | 66 | ||
67 | static void r_stop(struct seq_file *m, void *v) | 67 | static void r_stop(struct seq_file *m, void *v) |
68 | __releases(resource_lock) | 68 | __releases(resource_lock) |
69 | { | 69 | { |
70 | read_unlock(&resource_lock); | 70 | read_unlock(&resource_lock); |
71 | } | 71 | } |
72 | 72 | ||
73 | static int r_show(struct seq_file *m, void *v) | 73 | static int r_show(struct seq_file *m, void *v) |
74 | { | 74 | { |
75 | struct resource *root = m->private; | 75 | struct resource *root = m->private; |
76 | struct resource *r = v, *p; | 76 | struct resource *r = v, *p; |
77 | int width = root->end < 0x10000 ? 4 : 8; | 77 | int width = root->end < 0x10000 ? 4 : 8; |
78 | int depth; | 78 | int depth; |
79 | 79 | ||
80 | for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) | 80 | for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) |
81 | if (p->parent == root) | 81 | if (p->parent == root) |
82 | break; | 82 | break; |
83 | seq_printf(m, "%*s%0*llx-%0*llx : %s\n", | 83 | seq_printf(m, "%*s%0*llx-%0*llx : %s\n", |
84 | depth * 2, "", | 84 | depth * 2, "", |
85 | width, (unsigned long long) r->start, | 85 | width, (unsigned long long) r->start, |
86 | width, (unsigned long long) r->end, | 86 | width, (unsigned long long) r->end, |
87 | r->name ? r->name : "<BAD>"); | 87 | r->name ? r->name : "<BAD>"); |
88 | return 0; | 88 | return 0; |
89 | } | 89 | } |
90 | 90 | ||
91 | static const struct seq_operations resource_op = { | 91 | static const struct seq_operations resource_op = { |
92 | .start = r_start, | 92 | .start = r_start, |
93 | .next = r_next, | 93 | .next = r_next, |
94 | .stop = r_stop, | 94 | .stop = r_stop, |
95 | .show = r_show, | 95 | .show = r_show, |
96 | }; | 96 | }; |
97 | 97 | ||
98 | static int ioports_open(struct inode *inode, struct file *file) | 98 | static int ioports_open(struct inode *inode, struct file *file) |
99 | { | 99 | { |
100 | int res = seq_open(file, &resource_op); | 100 | int res = seq_open(file, &resource_op); |
101 | if (!res) { | 101 | if (!res) { |
102 | struct seq_file *m = file->private_data; | 102 | struct seq_file *m = file->private_data; |
103 | m->private = &ioport_resource; | 103 | m->private = &ioport_resource; |
104 | } | 104 | } |
105 | return res; | 105 | return res; |
106 | } | 106 | } |
107 | 107 | ||
108 | static int iomem_open(struct inode *inode, struct file *file) | 108 | static int iomem_open(struct inode *inode, struct file *file) |
109 | { | 109 | { |
110 | int res = seq_open(file, &resource_op); | 110 | int res = seq_open(file, &resource_op); |
111 | if (!res) { | 111 | if (!res) { |
112 | struct seq_file *m = file->private_data; | 112 | struct seq_file *m = file->private_data; |
113 | m->private = &iomem_resource; | 113 | m->private = &iomem_resource; |
114 | } | 114 | } |
115 | return res; | 115 | return res; |
116 | } | 116 | } |
117 | 117 | ||
118 | static const struct file_operations proc_ioports_operations = { | 118 | static const struct file_operations proc_ioports_operations = { |
119 | .open = ioports_open, | 119 | .open = ioports_open, |
120 | .read = seq_read, | 120 | .read = seq_read, |
121 | .llseek = seq_lseek, | 121 | .llseek = seq_lseek, |
122 | .release = seq_release, | 122 | .release = seq_release, |
123 | }; | 123 | }; |
124 | 124 | ||
125 | static const struct file_operations proc_iomem_operations = { | 125 | static const struct file_operations proc_iomem_operations = { |
126 | .open = iomem_open, | 126 | .open = iomem_open, |
127 | .read = seq_read, | 127 | .read = seq_read, |
128 | .llseek = seq_lseek, | 128 | .llseek = seq_lseek, |
129 | .release = seq_release, | 129 | .release = seq_release, |
130 | }; | 130 | }; |
131 | 131 | ||
132 | static int __init ioresources_init(void) | 132 | static int __init ioresources_init(void) |
133 | { | 133 | { |
134 | proc_create("ioports", 0, NULL, &proc_ioports_operations); | 134 | proc_create("ioports", 0, NULL, &proc_ioports_operations); |
135 | proc_create("iomem", 0, NULL, &proc_iomem_operations); | 135 | proc_create("iomem", 0, NULL, &proc_iomem_operations); |
136 | return 0; | 136 | return 0; |
137 | } | 137 | } |
138 | __initcall(ioresources_init); | 138 | __initcall(ioresources_init); |
139 | 139 | ||
140 | #endif /* CONFIG_PROC_FS */ | 140 | #endif /* CONFIG_PROC_FS */ |
141 | 141 | ||
142 | /* Return the conflict entry if you can't request it */ | 142 | /* Return the conflict entry if you can't request it */ |
143 | static struct resource * __request_resource(struct resource *root, struct resource *new) | 143 | static struct resource * __request_resource(struct resource *root, struct resource *new) |
144 | { | 144 | { |
145 | resource_size_t start = new->start; | 145 | resource_size_t start = new->start; |
146 | resource_size_t end = new->end; | 146 | resource_size_t end = new->end; |
147 | struct resource *tmp, **p; | 147 | struct resource *tmp, **p; |
148 | 148 | ||
149 | if (end < start) | 149 | if (end < start) |
150 | return root; | 150 | return root; |
151 | if (start < root->start) | 151 | if (start < root->start) |
152 | return root; | 152 | return root; |
153 | if (end > root->end) | 153 | if (end > root->end) |
154 | return root; | 154 | return root; |
155 | p = &root->child; | 155 | p = &root->child; |
156 | for (;;) { | 156 | for (;;) { |
157 | tmp = *p; | 157 | tmp = *p; |
158 | if (!tmp || tmp->start > end) { | 158 | if (!tmp || tmp->start > end) { |
159 | new->sibling = tmp; | 159 | new->sibling = tmp; |
160 | *p = new; | 160 | *p = new; |
161 | new->parent = root; | 161 | new->parent = root; |
162 | return NULL; | 162 | return NULL; |
163 | } | 163 | } |
164 | p = &tmp->sibling; | 164 | p = &tmp->sibling; |
165 | if (tmp->end < start) | 165 | if (tmp->end < start) |
166 | continue; | 166 | continue; |
167 | return tmp; | 167 | return tmp; |
168 | } | 168 | } |
169 | } | 169 | } |
170 | 170 | ||
171 | static int __release_resource(struct resource *old) | 171 | static int __release_resource(struct resource *old) |
172 | { | 172 | { |
173 | struct resource *tmp, **p; | 173 | struct resource *tmp, **p; |
174 | 174 | ||
175 | p = &old->parent->child; | 175 | p = &old->parent->child; |
176 | for (;;) { | 176 | for (;;) { |
177 | tmp = *p; | 177 | tmp = *p; |
178 | if (!tmp) | 178 | if (!tmp) |
179 | break; | 179 | break; |
180 | if (tmp == old) { | 180 | if (tmp == old) { |
181 | *p = tmp->sibling; | 181 | *p = tmp->sibling; |
182 | old->parent = NULL; | 182 | old->parent = NULL; |
183 | return 0; | 183 | return 0; |
184 | } | 184 | } |
185 | p = &tmp->sibling; | 185 | p = &tmp->sibling; |
186 | } | 186 | } |
187 | return -EINVAL; | 187 | return -EINVAL; |
188 | } | 188 | } |
189 | 189 | ||
190 | /** | 190 | /** |
191 | * request_resource - request and reserve an I/O or memory resource | 191 | * request_resource - request and reserve an I/O or memory resource |
192 | * @root: root resource descriptor | 192 | * @root: root resource descriptor |
193 | * @new: resource descriptor desired by caller | 193 | * @new: resource descriptor desired by caller |
194 | * | 194 | * |
195 | * Returns 0 for success, negative error code on error. | 195 | * Returns 0 for success, negative error code on error. |
196 | */ | 196 | */ |
197 | int request_resource(struct resource *root, struct resource *new) | 197 | int request_resource(struct resource *root, struct resource *new) |
198 | { | 198 | { |
199 | struct resource *conflict; | 199 | struct resource *conflict; |
200 | 200 | ||
201 | write_lock(&resource_lock); | 201 | write_lock(&resource_lock); |
202 | conflict = __request_resource(root, new); | 202 | conflict = __request_resource(root, new); |
203 | write_unlock(&resource_lock); | 203 | write_unlock(&resource_lock); |
204 | return conflict ? -EBUSY : 0; | 204 | return conflict ? -EBUSY : 0; |
205 | } | 205 | } |
206 | 206 | ||
207 | EXPORT_SYMBOL(request_resource); | 207 | EXPORT_SYMBOL(request_resource); |
208 | 208 | ||
209 | /** | 209 | /** |
210 | * release_resource - release a previously reserved resource | 210 | * release_resource - release a previously reserved resource |
211 | * @old: resource pointer | 211 | * @old: resource pointer |
212 | */ | 212 | */ |
213 | int release_resource(struct resource *old) | 213 | int release_resource(struct resource *old) |
214 | { | 214 | { |
215 | int retval; | 215 | int retval; |
216 | 216 | ||
217 | write_lock(&resource_lock); | 217 | write_lock(&resource_lock); |
218 | retval = __release_resource(old); | 218 | retval = __release_resource(old); |
219 | write_unlock(&resource_lock); | 219 | write_unlock(&resource_lock); |
220 | return retval; | 220 | return retval; |
221 | } | 221 | } |
222 | 222 | ||
223 | EXPORT_SYMBOL(release_resource); | 223 | EXPORT_SYMBOL(release_resource); |
224 | 224 | ||
225 | #if defined(CONFIG_MEMORY_HOTPLUG) && !defined(CONFIG_ARCH_HAS_WALK_MEMORY) | 225 | #if defined(CONFIG_MEMORY_HOTPLUG) && !defined(CONFIG_ARCH_HAS_WALK_MEMORY) |
226 | /* | 226 | /* |
227 | * Finds the lowest memory reosurce exists within [res->start.res->end) | 227 | * Finds the lowest memory reosurce exists within [res->start.res->end) |
228 | * the caller must specify res->start, res->end, res->flags. | 228 | * the caller must specify res->start, res->end, res->flags. |
229 | * If found, returns 0, res is overwritten, if not found, returns -1. | 229 | * If found, returns 0, res is overwritten, if not found, returns -1. |
230 | */ | 230 | */ |
231 | static int find_next_system_ram(struct resource *res) | 231 | static int find_next_system_ram(struct resource *res) |
232 | { | 232 | { |
233 | resource_size_t start, end; | 233 | resource_size_t start, end; |
234 | struct resource *p; | 234 | struct resource *p; |
235 | 235 | ||
236 | BUG_ON(!res); | 236 | BUG_ON(!res); |
237 | 237 | ||
238 | start = res->start; | 238 | start = res->start; |
239 | end = res->end; | 239 | end = res->end; |
240 | BUG_ON(start >= end); | 240 | BUG_ON(start >= end); |
241 | 241 | ||
242 | read_lock(&resource_lock); | 242 | read_lock(&resource_lock); |
243 | for (p = iomem_resource.child; p ; p = p->sibling) { | 243 | for (p = iomem_resource.child; p ; p = p->sibling) { |
244 | /* system ram is just marked as IORESOURCE_MEM */ | 244 | /* system ram is just marked as IORESOURCE_MEM */ |
245 | if (p->flags != res->flags) | 245 | if (p->flags != res->flags) |
246 | continue; | 246 | continue; |
247 | if (p->start > end) { | 247 | if (p->start > end) { |
248 | p = NULL; | 248 | p = NULL; |
249 | break; | 249 | break; |
250 | } | 250 | } |
251 | if ((p->end >= start) && (p->start < end)) | 251 | if ((p->end >= start) && (p->start < end)) |
252 | break; | 252 | break; |
253 | } | 253 | } |
254 | read_unlock(&resource_lock); | 254 | read_unlock(&resource_lock); |
255 | if (!p) | 255 | if (!p) |
256 | return -1; | 256 | return -1; |
257 | /* copy data */ | 257 | /* copy data */ |
258 | if (res->start < p->start) | 258 | if (res->start < p->start) |
259 | res->start = p->start; | 259 | res->start = p->start; |
260 | if (res->end > p->end) | 260 | if (res->end > p->end) |
261 | res->end = p->end; | 261 | res->end = p->end; |
262 | return 0; | 262 | return 0; |
263 | } | 263 | } |
264 | int | 264 | int |
265 | walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, | 265 | walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, |
266 | int (*func)(unsigned long, unsigned long, void *)) | 266 | int (*func)(unsigned long, unsigned long, void *)) |
267 | { | 267 | { |
268 | struct resource res; | 268 | struct resource res; |
269 | unsigned long pfn, len; | 269 | unsigned long pfn, len; |
270 | u64 orig_end; | 270 | u64 orig_end; |
271 | int ret = -1; | 271 | int ret = -1; |
272 | res.start = (u64) start_pfn << PAGE_SHIFT; | 272 | res.start = (u64) start_pfn << PAGE_SHIFT; |
273 | res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; | 273 | res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; |
274 | res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 274 | res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
275 | orig_end = res.end; | 275 | orig_end = res.end; |
276 | while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) { | 276 | while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) { |
277 | pfn = (unsigned long)(res.start >> PAGE_SHIFT); | 277 | pfn = (unsigned long)(res.start >> PAGE_SHIFT); |
278 | len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT); | 278 | len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT); |
279 | ret = (*func)(pfn, len, arg); | 279 | ret = (*func)(pfn, len, arg); |
280 | if (ret) | 280 | if (ret) |
281 | break; | 281 | break; |
282 | res.start = res.end + 1; | 282 | res.start = res.end + 1; |
283 | res.end = orig_end; | 283 | res.end = orig_end; |
284 | } | 284 | } |
285 | return ret; | 285 | return ret; |
286 | } | 286 | } |
287 | 287 | ||
288 | #endif | 288 | #endif |
289 | 289 | ||
290 | /* | 290 | /* |
291 | * Find empty slot in the resource tree given range and alignment. | 291 | * Find empty slot in the resource tree given range and alignment. |
292 | */ | 292 | */ |
293 | static int find_resource(struct resource *root, struct resource *new, | 293 | static int find_resource(struct resource *root, struct resource *new, |
294 | resource_size_t size, resource_size_t min, | 294 | resource_size_t size, resource_size_t min, |
295 | resource_size_t max, resource_size_t align, | 295 | resource_size_t max, resource_size_t align, |
296 | void (*alignf)(void *, struct resource *, | 296 | void (*alignf)(void *, struct resource *, |
297 | resource_size_t, resource_size_t), | 297 | resource_size_t, resource_size_t), |
298 | void *alignf_data) | 298 | void *alignf_data) |
299 | { | 299 | { |
300 | struct resource *this = root->child; | 300 | struct resource *this = root->child; |
301 | 301 | ||
302 | new->start = root->start; | 302 | new->start = root->start; |
303 | /* | 303 | /* |
304 | * Skip past an allocated resource that starts at 0, since the assignment | 304 | * Skip past an allocated resource that starts at 0, since the assignment |
305 | * of this->start - 1 to new->end below would cause an underflow. | 305 | * of this->start - 1 to new->end below would cause an underflow. |
306 | */ | 306 | */ |
307 | if (this && this->start == 0) { | 307 | if (this && this->start == 0) { |
308 | new->start = this->end + 1; | 308 | new->start = this->end + 1; |
309 | this = this->sibling; | 309 | this = this->sibling; |
310 | } | 310 | } |
311 | for(;;) { | 311 | for(;;) { |
312 | if (this) | 312 | if (this) |
313 | new->end = this->start - 1; | 313 | new->end = this->start - 1; |
314 | else | 314 | else |
315 | new->end = root->end; | 315 | new->end = root->end; |
316 | if (new->start < min) | 316 | if (new->start < min) |
317 | new->start = min; | 317 | new->start = min; |
318 | if (new->end > max) | 318 | if (new->end > max) |
319 | new->end = max; | 319 | new->end = max; |
320 | new->start = ALIGN(new->start, align); | 320 | new->start = ALIGN(new->start, align); |
321 | if (alignf) | 321 | if (alignf) |
322 | alignf(alignf_data, new, size, align); | 322 | alignf(alignf_data, new, size, align); |
323 | if (new->start < new->end && new->end - new->start >= size - 1) { | 323 | if (new->start < new->end && new->end - new->start >= size - 1) { |
324 | new->end = new->start + size - 1; | 324 | new->end = new->start + size - 1; |
325 | return 0; | 325 | return 0; |
326 | } | 326 | } |
327 | if (!this) | 327 | if (!this) |
328 | break; | 328 | break; |
329 | new->start = this->end + 1; | 329 | new->start = this->end + 1; |
330 | this = this->sibling; | 330 | this = this->sibling; |
331 | } | 331 | } |
332 | return -EBUSY; | 332 | return -EBUSY; |
333 | } | 333 | } |
334 | 334 | ||
335 | /** | 335 | /** |
336 | * allocate_resource - allocate empty slot in the resource tree given range & alignment | 336 | * allocate_resource - allocate empty slot in the resource tree given range & alignment |
337 | * @root: root resource descriptor | 337 | * @root: root resource descriptor |
338 | * @new: resource descriptor desired by caller | 338 | * @new: resource descriptor desired by caller |
339 | * @size: requested resource region size | 339 | * @size: requested resource region size |
340 | * @min: minimum size to allocate | 340 | * @min: minimum size to allocate |
341 | * @max: maximum size to allocate | 341 | * @max: maximum size to allocate |
342 | * @align: alignment requested, in bytes | 342 | * @align: alignment requested, in bytes |
343 | * @alignf: alignment function, optional, called if not NULL | 343 | * @alignf: alignment function, optional, called if not NULL |
344 | * @alignf_data: arbitrary data to pass to the @alignf function | 344 | * @alignf_data: arbitrary data to pass to the @alignf function |
345 | */ | 345 | */ |
346 | int allocate_resource(struct resource *root, struct resource *new, | 346 | int allocate_resource(struct resource *root, struct resource *new, |
347 | resource_size_t size, resource_size_t min, | 347 | resource_size_t size, resource_size_t min, |
348 | resource_size_t max, resource_size_t align, | 348 | resource_size_t max, resource_size_t align, |
349 | void (*alignf)(void *, struct resource *, | 349 | void (*alignf)(void *, struct resource *, |
350 | resource_size_t, resource_size_t), | 350 | resource_size_t, resource_size_t), |
351 | void *alignf_data) | 351 | void *alignf_data) |
352 | { | 352 | { |
353 | int err; | 353 | int err; |
354 | 354 | ||
355 | write_lock(&resource_lock); | 355 | write_lock(&resource_lock); |
356 | err = find_resource(root, new, size, min, max, align, alignf, alignf_data); | 356 | err = find_resource(root, new, size, min, max, align, alignf, alignf_data); |
357 | if (err >= 0 && __request_resource(root, new)) | 357 | if (err >= 0 && __request_resource(root, new)) |
358 | err = -EBUSY; | 358 | err = -EBUSY; |
359 | write_unlock(&resource_lock); | 359 | write_unlock(&resource_lock); |
360 | return err; | 360 | return err; |
361 | } | 361 | } |
362 | 362 | ||
363 | EXPORT_SYMBOL(allocate_resource); | 363 | EXPORT_SYMBOL(allocate_resource); |
364 | 364 | ||
365 | /* | 365 | /* |
366 | * Insert a resource into the resource tree. If successful, return NULL, | 366 | * Insert a resource into the resource tree. If successful, return NULL, |
367 | * otherwise return the conflicting resource (compare to __request_resource()) | 367 | * otherwise return the conflicting resource (compare to __request_resource()) |
368 | */ | 368 | */ |
369 | static struct resource * __insert_resource(struct resource *parent, struct resource *new) | 369 | static struct resource * __insert_resource(struct resource *parent, struct resource *new) |
370 | { | 370 | { |
371 | struct resource *first, *next; | 371 | struct resource *first, *next; |
372 | 372 | ||
373 | for (;; parent = first) { | 373 | for (;; parent = first) { |
374 | first = __request_resource(parent, new); | 374 | first = __request_resource(parent, new); |
375 | if (!first) | 375 | if (!first) |
376 | return first; | 376 | return first; |
377 | 377 | ||
378 | if (first == parent) | 378 | if (first == parent) |
379 | return first; | 379 | return first; |
380 | 380 | ||
381 | if ((first->start > new->start) || (first->end < new->end)) | 381 | if ((first->start > new->start) || (first->end < new->end)) |
382 | break; | 382 | break; |
383 | if ((first->start == new->start) && (first->end == new->end)) | 383 | if ((first->start == new->start) && (first->end == new->end)) |
384 | break; | 384 | break; |
385 | } | 385 | } |
386 | 386 | ||
387 | for (next = first; ; next = next->sibling) { | 387 | for (next = first; ; next = next->sibling) { |
388 | /* Partial overlap? Bad, and unfixable */ | 388 | /* Partial overlap? Bad, and unfixable */ |
389 | if (next->start < new->start || next->end > new->end) | 389 | if (next->start < new->start || next->end > new->end) |
390 | return next; | 390 | return next; |
391 | if (!next->sibling) | 391 | if (!next->sibling) |
392 | break; | 392 | break; |
393 | if (next->sibling->start > new->end) | 393 | if (next->sibling->start > new->end) |
394 | break; | 394 | break; |
395 | } | 395 | } |
396 | 396 | ||
397 | new->parent = parent; | 397 | new->parent = parent; |
398 | new->sibling = next->sibling; | 398 | new->sibling = next->sibling; |
399 | new->child = first; | 399 | new->child = first; |
400 | 400 | ||
401 | next->sibling = NULL; | 401 | next->sibling = NULL; |
402 | for (next = first; next; next = next->sibling) | 402 | for (next = first; next; next = next->sibling) |
403 | next->parent = new; | 403 | next->parent = new; |
404 | 404 | ||
405 | if (parent->child == first) { | 405 | if (parent->child == first) { |
406 | parent->child = new; | 406 | parent->child = new; |
407 | } else { | 407 | } else { |
408 | next = parent->child; | 408 | next = parent->child; |
409 | while (next->sibling != first) | 409 | while (next->sibling != first) |
410 | next = next->sibling; | 410 | next = next->sibling; |
411 | next->sibling = new; | 411 | next->sibling = new; |
412 | } | 412 | } |
413 | return NULL; | 413 | return NULL; |
414 | } | 414 | } |
415 | 415 | ||
416 | /** | 416 | /** |
417 | * insert_resource - Inserts a resource in the resource tree | 417 | * insert_resource - Inserts a resource in the resource tree |
418 | * @parent: parent of the new resource | 418 | * @parent: parent of the new resource |
419 | * @new: new resource to insert | 419 | * @new: new resource to insert |
420 | * | 420 | * |
421 | * Returns 0 on success, -EBUSY if the resource can't be inserted. | 421 | * Returns 0 on success, -EBUSY if the resource can't be inserted. |
422 | * | 422 | * |
423 | * This function is equivalent to request_resource when no conflict | 423 | * This function is equivalent to request_resource when no conflict |
424 | * happens. If a conflict happens, and the conflicting resources | 424 | * happens. If a conflict happens, and the conflicting resources |
425 | * entirely fit within the range of the new resource, then the new | 425 | * entirely fit within the range of the new resource, then the new |
426 | * resource is inserted and the conflicting resources become children of | 426 | * resource is inserted and the conflicting resources become children of |
427 | * the new resource. | 427 | * the new resource. |
428 | */ | 428 | */ |
429 | int insert_resource(struct resource *parent, struct resource *new) | 429 | int insert_resource(struct resource *parent, struct resource *new) |
430 | { | 430 | { |
431 | struct resource *conflict; | 431 | struct resource *conflict; |
432 | 432 | ||
433 | write_lock(&resource_lock); | 433 | write_lock(&resource_lock); |
434 | conflict = __insert_resource(parent, new); | 434 | conflict = __insert_resource(parent, new); |
435 | write_unlock(&resource_lock); | 435 | write_unlock(&resource_lock); |
436 | return conflict ? -EBUSY : 0; | 436 | return conflict ? -EBUSY : 0; |
437 | } | 437 | } |
438 | 438 | ||
439 | /** | 439 | /** |
440 | * insert_resource_expand_to_fit - Insert a resource into the resource tree | 440 | * insert_resource_expand_to_fit - Insert a resource into the resource tree |
441 | * @root: root resource descriptor | 441 | * @root: root resource descriptor |
442 | * @new: new resource to insert | 442 | * @new: new resource to insert |
443 | * | 443 | * |
444 | * Insert a resource into the resource tree, possibly expanding it in order | 444 | * Insert a resource into the resource tree, possibly expanding it in order |
445 | * to make it encompass any conflicting resources. | 445 | * to make it encompass any conflicting resources. |
446 | */ | 446 | */ |
447 | void insert_resource_expand_to_fit(struct resource *root, struct resource *new) | 447 | void insert_resource_expand_to_fit(struct resource *root, struct resource *new) |
448 | { | 448 | { |
449 | if (new->parent) | 449 | if (new->parent) |
450 | return; | 450 | return; |
451 | 451 | ||
452 | write_lock(&resource_lock); | 452 | write_lock(&resource_lock); |
453 | for (;;) { | 453 | for (;;) { |
454 | struct resource *conflict; | 454 | struct resource *conflict; |
455 | 455 | ||
456 | conflict = __insert_resource(root, new); | 456 | conflict = __insert_resource(root, new); |
457 | if (!conflict) | 457 | if (!conflict) |
458 | break; | 458 | break; |
459 | if (conflict == root) | 459 | if (conflict == root) |
460 | break; | 460 | break; |
461 | 461 | ||
462 | /* Ok, expand resource to cover the conflict, then try again .. */ | 462 | /* Ok, expand resource to cover the conflict, then try again .. */ |
463 | if (conflict->start < new->start) | 463 | if (conflict->start < new->start) |
464 | new->start = conflict->start; | 464 | new->start = conflict->start; |
465 | if (conflict->end > new->end) | 465 | if (conflict->end > new->end) |
466 | new->end = conflict->end; | 466 | new->end = conflict->end; |
467 | 467 | ||
468 | printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); | 468 | printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); |
469 | } | 469 | } |
470 | write_unlock(&resource_lock); | 470 | write_unlock(&resource_lock); |
471 | } | 471 | } |
472 | 472 | ||
473 | /** | 473 | /** |
474 | * adjust_resource - modify a resource's start and size | 474 | * adjust_resource - modify a resource's start and size |
475 | * @res: resource to modify | 475 | * @res: resource to modify |
476 | * @start: new start value | 476 | * @start: new start value |
477 | * @size: new size | 477 | * @size: new size |
478 | * | 478 | * |
479 | * Given an existing resource, change its start and size to match the | 479 | * Given an existing resource, change its start and size to match the |
480 | * arguments. Returns 0 on success, -EBUSY if it can't fit. | 480 | * arguments. Returns 0 on success, -EBUSY if it can't fit. |
481 | * Existing children of the resource are assumed to be immutable. | 481 | * Existing children of the resource are assumed to be immutable. |
482 | */ | 482 | */ |
483 | int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size) | 483 | int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size) |
484 | { | 484 | { |
485 | struct resource *tmp, *parent = res->parent; | 485 | struct resource *tmp, *parent = res->parent; |
486 | resource_size_t end = start + size - 1; | 486 | resource_size_t end = start + size - 1; |
487 | int result = -EBUSY; | 487 | int result = -EBUSY; |
488 | 488 | ||
489 | write_lock(&resource_lock); | 489 | write_lock(&resource_lock); |
490 | 490 | ||
491 | if ((start < parent->start) || (end > parent->end)) | 491 | if ((start < parent->start) || (end > parent->end)) |
492 | goto out; | 492 | goto out; |
493 | 493 | ||
494 | for (tmp = res->child; tmp; tmp = tmp->sibling) { | 494 | for (tmp = res->child; tmp; tmp = tmp->sibling) { |
495 | if ((tmp->start < start) || (tmp->end > end)) | 495 | if ((tmp->start < start) || (tmp->end > end)) |
496 | goto out; | 496 | goto out; |
497 | } | 497 | } |
498 | 498 | ||
499 | if (res->sibling && (res->sibling->start <= end)) | 499 | if (res->sibling && (res->sibling->start <= end)) |
500 | goto out; | 500 | goto out; |
501 | 501 | ||
502 | tmp = parent->child; | 502 | tmp = parent->child; |
503 | if (tmp != res) { | 503 | if (tmp != res) { |
504 | while (tmp->sibling != res) | 504 | while (tmp->sibling != res) |
505 | tmp = tmp->sibling; | 505 | tmp = tmp->sibling; |
506 | if (start <= tmp->end) | 506 | if (start <= tmp->end) |
507 | goto out; | 507 | goto out; |
508 | } | 508 | } |
509 | 509 | ||
510 | res->start = start; | 510 | res->start = start; |
511 | res->end = end; | 511 | res->end = end; |
512 | result = 0; | 512 | result = 0; |
513 | 513 | ||
514 | out: | 514 | out: |
515 | write_unlock(&resource_lock); | 515 | write_unlock(&resource_lock); |
516 | return result; | 516 | return result; |
517 | } | 517 | } |
518 | 518 | ||
519 | static void __init __reserve_region_with_split(struct resource *root, | ||
520 | resource_size_t start, resource_size_t end, | ||
521 | const char *name) | ||
522 | { | ||
523 | struct resource *parent = root; | ||
524 | struct resource *conflict; | ||
525 | struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); | ||
526 | |||
527 | if (!res) | ||
528 | return; | ||
529 | |||
530 | res->name = name; | ||
531 | res->start = start; | ||
532 | res->end = end; | ||
533 | res->flags = IORESOURCE_BUSY; | ||
534 | |||
535 | for (;;) { | ||
536 | conflict = __request_resource(parent, res); | ||
537 | if (!conflict) | ||
538 | break; | ||
539 | if (conflict != parent) { | ||
540 | parent = conflict; | ||
541 | if (!(conflict->flags & IORESOURCE_BUSY)) | ||
542 | continue; | ||
543 | } | ||
544 | |||
545 | /* Uhhuh, that didn't work out.. */ | ||
546 | kfree(res); | ||
547 | res = NULL; | ||
548 | break; | ||
549 | } | ||
550 | |||
551 | if (!res) { | ||
552 | printk(KERN_DEBUG " __reserve_region_with_split: (%s) [%llx, %llx], res: (%s) [%llx, %llx]\n", | ||
553 | conflict->name, conflict->start, conflict->end, | ||
554 | name, start, end); | ||
555 | |||
556 | /* failed, split and try again */ | ||
557 | |||
558 | /* conflict coverred whole area */ | ||
559 | if (conflict->start <= start && conflict->end >= end) | ||
560 | return; | ||
561 | |||
562 | if (conflict->start > start) | ||
563 | __reserve_region_with_split(root, start, conflict->start-1, name); | ||
564 | if (!(conflict->flags & IORESOURCE_BUSY)) { | ||
565 | resource_size_t common_start, common_end; | ||
566 | |||
567 | common_start = max(conflict->start, start); | ||
568 | common_end = min(conflict->end, end); | ||
569 | if (common_start < common_end) | ||
570 | __reserve_region_with_split(root, common_start, common_end, name); | ||
571 | } | ||
572 | if (conflict->end < end) | ||
573 | __reserve_region_with_split(root, conflict->end+1, end, name); | ||
574 | } | ||
575 | |||
576 | } | ||
577 | |||
578 | void reserve_region_with_split(struct resource *root, | ||
579 | resource_size_t start, resource_size_t end, | ||
580 | const char *name) | ||
581 | { | ||
582 | write_lock(&resource_lock); | ||
583 | __reserve_region_with_split(root, start, end, name); | ||
584 | write_unlock(&resource_lock); | ||
585 | } | ||
586 | |||
519 | EXPORT_SYMBOL(adjust_resource); | 587 | EXPORT_SYMBOL(adjust_resource); |
520 | 588 | ||
521 | /** | 589 | /** |
522 | * resource_alignment - calculate resource's alignment | 590 | * resource_alignment - calculate resource's alignment |
523 | * @res: resource pointer | 591 | * @res: resource pointer |
524 | * | 592 | * |
525 | * Returns alignment on success, 0 (invalid alignment) on failure. | 593 | * Returns alignment on success, 0 (invalid alignment) on failure. |
526 | */ | 594 | */ |
527 | resource_size_t resource_alignment(struct resource *res) | 595 | resource_size_t resource_alignment(struct resource *res) |
528 | { | 596 | { |
529 | switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { | 597 | switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { |
530 | case IORESOURCE_SIZEALIGN: | 598 | case IORESOURCE_SIZEALIGN: |
531 | return resource_size(res); | 599 | return resource_size(res); |
532 | case IORESOURCE_STARTALIGN: | 600 | case IORESOURCE_STARTALIGN: |
533 | return res->start; | 601 | return res->start; |
534 | default: | 602 | default: |
535 | return 0; | 603 | return 0; |
536 | } | 604 | } |
537 | } | 605 | } |
538 | 606 | ||
539 | /* | 607 | /* |
540 | * This is compatibility stuff for IO resources. | 608 | * This is compatibility stuff for IO resources. |
541 | * | 609 | * |
542 | * Note how this, unlike the above, knows about | 610 | * Note how this, unlike the above, knows about |
543 | * the IO flag meanings (busy etc). | 611 | * the IO flag meanings (busy etc). |
544 | * | 612 | * |
545 | * request_region creates a new busy region. | 613 | * request_region creates a new busy region. |
546 | * | 614 | * |
547 | * check_region returns non-zero if the area is already busy. | 615 | * check_region returns non-zero if the area is already busy. |
548 | * | 616 | * |
549 | * release_region releases a matching busy region. | 617 | * release_region releases a matching busy region. |
550 | */ | 618 | */ |
551 | 619 | ||
552 | /** | 620 | /** |
553 | * __request_region - create a new busy resource region | 621 | * __request_region - create a new busy resource region |
554 | * @parent: parent resource descriptor | 622 | * @parent: parent resource descriptor |
555 | * @start: resource start address | 623 | * @start: resource start address |
556 | * @n: resource region size | 624 | * @n: resource region size |
557 | * @name: reserving caller's ID string | 625 | * @name: reserving caller's ID string |
558 | */ | 626 | */ |
559 | struct resource * __request_region(struct resource *parent, | 627 | struct resource * __request_region(struct resource *parent, |
560 | resource_size_t start, resource_size_t n, | 628 | resource_size_t start, resource_size_t n, |
561 | const char *name) | 629 | const char *name) |
562 | { | 630 | { |
563 | struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); | 631 | struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); |
564 | 632 | ||
565 | if (res) { | 633 | if (res) { |
566 | res->name = name; | 634 | res->name = name; |
567 | res->start = start; | 635 | res->start = start; |
568 | res->end = start + n - 1; | 636 | res->end = start + n - 1; |
569 | res->flags = IORESOURCE_BUSY; | 637 | res->flags = IORESOURCE_BUSY; |
570 | 638 | ||
571 | write_lock(&resource_lock); | 639 | write_lock(&resource_lock); |
572 | 640 | ||
573 | for (;;) { | 641 | for (;;) { |
574 | struct resource *conflict; | 642 | struct resource *conflict; |
575 | 643 | ||
576 | conflict = __request_resource(parent, res); | 644 | conflict = __request_resource(parent, res); |
577 | if (!conflict) | 645 | if (!conflict) |
578 | break; | 646 | break; |
579 | if (conflict != parent) { | 647 | if (conflict != parent) { |
580 | parent = conflict; | 648 | parent = conflict; |
581 | if (!(conflict->flags & IORESOURCE_BUSY)) | 649 | if (!(conflict->flags & IORESOURCE_BUSY)) |
582 | continue; | 650 | continue; |
583 | } | 651 | } |
584 | 652 | ||
585 | /* Uhhuh, that didn't work out.. */ | 653 | /* Uhhuh, that didn't work out.. */ |
586 | kfree(res); | 654 | kfree(res); |
587 | res = NULL; | 655 | res = NULL; |
588 | break; | 656 | break; |
589 | } | 657 | } |
590 | write_unlock(&resource_lock); | 658 | write_unlock(&resource_lock); |
591 | } | 659 | } |
592 | return res; | 660 | return res; |
593 | } | 661 | } |
594 | EXPORT_SYMBOL(__request_region); | 662 | EXPORT_SYMBOL(__request_region); |
595 | 663 | ||
596 | /** | 664 | /** |
597 | * __check_region - check if a resource region is busy or free | 665 | * __check_region - check if a resource region is busy or free |
598 | * @parent: parent resource descriptor | 666 | * @parent: parent resource descriptor |
599 | * @start: resource start address | 667 | * @start: resource start address |
600 | * @n: resource region size | 668 | * @n: resource region size |
601 | * | 669 | * |
602 | * Returns 0 if the region is free at the moment it is checked, | 670 | * Returns 0 if the region is free at the moment it is checked, |
603 | * returns %-EBUSY if the region is busy. | 671 | * returns %-EBUSY if the region is busy. |
604 | * | 672 | * |
605 | * NOTE: | 673 | * NOTE: |
606 | * This function is deprecated because its use is racy. | 674 | * This function is deprecated because its use is racy. |
607 | * Even if it returns 0, a subsequent call to request_region() | 675 | * Even if it returns 0, a subsequent call to request_region() |
608 | * may fail because another driver etc. just allocated the region. | 676 | * may fail because another driver etc. just allocated the region. |
609 | * Do NOT use it. It will be removed from the kernel. | 677 | * Do NOT use it. It will be removed from the kernel. |
610 | */ | 678 | */ |
611 | int __check_region(struct resource *parent, resource_size_t start, | 679 | int __check_region(struct resource *parent, resource_size_t start, |
612 | resource_size_t n) | 680 | resource_size_t n) |
613 | { | 681 | { |
614 | struct resource * res; | 682 | struct resource * res; |
615 | 683 | ||
616 | res = __request_region(parent, start, n, "check-region"); | 684 | res = __request_region(parent, start, n, "check-region"); |
617 | if (!res) | 685 | if (!res) |
618 | return -EBUSY; | 686 | return -EBUSY; |
619 | 687 | ||
620 | release_resource(res); | 688 | release_resource(res); |
621 | kfree(res); | 689 | kfree(res); |
622 | return 0; | 690 | return 0; |
623 | } | 691 | } |
624 | EXPORT_SYMBOL(__check_region); | 692 | EXPORT_SYMBOL(__check_region); |
625 | 693 | ||
626 | /** | 694 | /** |
627 | * __release_region - release a previously reserved resource region | 695 | * __release_region - release a previously reserved resource region |
628 | * @parent: parent resource descriptor | 696 | * @parent: parent resource descriptor |
629 | * @start: resource start address | 697 | * @start: resource start address |
630 | * @n: resource region size | 698 | * @n: resource region size |
631 | * | 699 | * |
632 | * The described resource region must match a currently busy region. | 700 | * The described resource region must match a currently busy region. |
633 | */ | 701 | */ |
634 | void __release_region(struct resource *parent, resource_size_t start, | 702 | void __release_region(struct resource *parent, resource_size_t start, |
635 | resource_size_t n) | 703 | resource_size_t n) |
636 | { | 704 | { |
637 | struct resource **p; | 705 | struct resource **p; |
638 | resource_size_t end; | 706 | resource_size_t end; |
639 | 707 | ||
640 | p = &parent->child; | 708 | p = &parent->child; |
641 | end = start + n - 1; | 709 | end = start + n - 1; |
642 | 710 | ||
643 | write_lock(&resource_lock); | 711 | write_lock(&resource_lock); |
644 | 712 | ||
645 | for (;;) { | 713 | for (;;) { |
646 | struct resource *res = *p; | 714 | struct resource *res = *p; |
647 | 715 | ||
648 | if (!res) | 716 | if (!res) |
649 | break; | 717 | break; |
650 | if (res->start <= start && res->end >= end) { | 718 | if (res->start <= start && res->end >= end) { |
651 | if (!(res->flags & IORESOURCE_BUSY)) { | 719 | if (!(res->flags & IORESOURCE_BUSY)) { |
652 | p = &res->child; | 720 | p = &res->child; |
653 | continue; | 721 | continue; |
654 | } | 722 | } |
655 | if (res->start != start || res->end != end) | 723 | if (res->start != start || res->end != end) |
656 | break; | 724 | break; |
657 | *p = res->sibling; | 725 | *p = res->sibling; |
658 | write_unlock(&resource_lock); | 726 | write_unlock(&resource_lock); |
659 | kfree(res); | 727 | kfree(res); |
660 | return; | 728 | return; |
661 | } | 729 | } |
662 | p = &res->sibling; | 730 | p = &res->sibling; |
663 | } | 731 | } |
664 | 732 | ||
665 | write_unlock(&resource_lock); | 733 | write_unlock(&resource_lock); |
666 | 734 | ||
667 | printk(KERN_WARNING "Trying to free nonexistent resource " | 735 | printk(KERN_WARNING "Trying to free nonexistent resource " |
668 | "<%016llx-%016llx>\n", (unsigned long long)start, | 736 | "<%016llx-%016llx>\n", (unsigned long long)start, |
669 | (unsigned long long)end); | 737 | (unsigned long long)end); |
670 | } | 738 | } |
671 | EXPORT_SYMBOL(__release_region); | 739 | EXPORT_SYMBOL(__release_region); |
672 | 740 | ||
673 | /* | 741 | /* |
674 | * Managed region resource | 742 | * Managed region resource |
675 | */ | 743 | */ |
676 | struct region_devres { | 744 | struct region_devres { |
677 | struct resource *parent; | 745 | struct resource *parent; |
678 | resource_size_t start; | 746 | resource_size_t start; |
679 | resource_size_t n; | 747 | resource_size_t n; |
680 | }; | 748 | }; |
681 | 749 | ||
682 | static void devm_region_release(struct device *dev, void *res) | 750 | static void devm_region_release(struct device *dev, void *res) |
683 | { | 751 | { |
684 | struct region_devres *this = res; | 752 | struct region_devres *this = res; |
685 | 753 | ||
686 | __release_region(this->parent, this->start, this->n); | 754 | __release_region(this->parent, this->start, this->n); |
687 | } | 755 | } |
688 | 756 | ||
689 | static int devm_region_match(struct device *dev, void *res, void *match_data) | 757 | static int devm_region_match(struct device *dev, void *res, void *match_data) |
690 | { | 758 | { |
691 | struct region_devres *this = res, *match = match_data; | 759 | struct region_devres *this = res, *match = match_data; |
692 | 760 | ||
693 | return this->parent == match->parent && | 761 | return this->parent == match->parent && |
694 | this->start == match->start && this->n == match->n; | 762 | this->start == match->start && this->n == match->n; |
695 | } | 763 | } |
696 | 764 | ||
697 | struct resource * __devm_request_region(struct device *dev, | 765 | struct resource * __devm_request_region(struct device *dev, |
698 | struct resource *parent, resource_size_t start, | 766 | struct resource *parent, resource_size_t start, |
699 | resource_size_t n, const char *name) | 767 | resource_size_t n, const char *name) |
700 | { | 768 | { |
701 | struct region_devres *dr = NULL; | 769 | struct region_devres *dr = NULL; |
702 | struct resource *res; | 770 | struct resource *res; |
703 | 771 | ||
704 | dr = devres_alloc(devm_region_release, sizeof(struct region_devres), | 772 | dr = devres_alloc(devm_region_release, sizeof(struct region_devres), |
705 | GFP_KERNEL); | 773 | GFP_KERNEL); |
706 | if (!dr) | 774 | if (!dr) |
707 | return NULL; | 775 | return NULL; |
708 | 776 | ||
709 | dr->parent = parent; | 777 | dr->parent = parent; |
710 | dr->start = start; | 778 | dr->start = start; |
711 | dr->n = n; | 779 | dr->n = n; |
712 | 780 | ||
713 | res = __request_region(parent, start, n, name); | 781 | res = __request_region(parent, start, n, name); |
714 | if (res) | 782 | if (res) |
715 | devres_add(dev, dr); | 783 | devres_add(dev, dr); |
716 | else | 784 | else |
717 | devres_free(dr); | 785 | devres_free(dr); |
718 | 786 | ||
719 | return res; | 787 | return res; |
720 | } | 788 | } |
721 | EXPORT_SYMBOL(__devm_request_region); | 789 | EXPORT_SYMBOL(__devm_request_region); |
722 | 790 | ||
723 | void __devm_release_region(struct device *dev, struct resource *parent, | 791 | void __devm_release_region(struct device *dev, struct resource *parent, |
724 | resource_size_t start, resource_size_t n) | 792 | resource_size_t start, resource_size_t n) |
725 | { | 793 | { |
726 | struct region_devres match_data = { parent, start, n }; | 794 | struct region_devres match_data = { parent, start, n }; |
727 | 795 | ||
728 | __release_region(parent, start, n); | 796 | __release_region(parent, start, n); |
729 | WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, | 797 | WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, |
730 | &match_data)); | 798 | &match_data)); |
731 | } | 799 | } |
732 | EXPORT_SYMBOL(__devm_release_region); | 800 | EXPORT_SYMBOL(__devm_release_region); |
733 | 801 | ||
734 | /* | 802 | /* |
735 | * Called from init/main.c to reserve IO ports. | 803 | * Called from init/main.c to reserve IO ports. |
736 | */ | 804 | */ |
737 | #define MAXRESERVE 4 | 805 | #define MAXRESERVE 4 |
738 | static int __init reserve_setup(char *str) | 806 | static int __init reserve_setup(char *str) |
739 | { | 807 | { |
740 | static int reserved; | 808 | static int reserved; |
741 | static struct resource reserve[MAXRESERVE]; | 809 | static struct resource reserve[MAXRESERVE]; |
742 | 810 | ||
743 | for (;;) { | 811 | for (;;) { |
744 | int io_start, io_num; | 812 | int io_start, io_num; |
745 | int x = reserved; | 813 | int x = reserved; |
746 | 814 | ||
747 | if (get_option (&str, &io_start) != 2) | 815 | if (get_option (&str, &io_start) != 2) |
748 | break; | 816 | break; |
749 | if (get_option (&str, &io_num) == 0) | 817 | if (get_option (&str, &io_num) == 0) |
750 | break; | 818 | break; |
751 | if (x < MAXRESERVE) { | 819 | if (x < MAXRESERVE) { |
752 | struct resource *res = reserve + x; | 820 | struct resource *res = reserve + x; |
753 | res->name = "reserved"; | 821 | res->name = "reserved"; |
754 | res->start = io_start; | 822 | res->start = io_start; |
755 | res->end = io_start + io_num - 1; | 823 | res->end = io_start + io_num - 1; |
756 | res->flags = IORESOURCE_BUSY; | 824 | res->flags = IORESOURCE_BUSY; |
757 | res->child = NULL; | 825 | res->child = NULL; |
758 | if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0) | 826 | if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0) |
759 | reserved = x+1; | 827 | reserved = x+1; |
760 | } | 828 | } |
761 | } | 829 | } |
762 | return 1; | 830 | return 1; |
763 | } | 831 | } |
764 | 832 | ||
765 | __setup("reserve=", reserve_setup); | 833 | __setup("reserve=", reserve_setup); |
766 | 834 |