Blame view

include/linux/memory_hotplug.h 5.04 KB
208d54e55   Dave Hansen   [PATCH] memory ho...
1
2
3
4
5
  #ifndef __LINUX_MEMORY_HOTPLUG_H
  #define __LINUX_MEMORY_HOTPLUG_H
  
  #include <linux/mmzone.h>
  #include <linux/spinlock.h>
3947be196   Dave Hansen   [PATCH] memory ho...
6
7
  #include <linux/mmzone.h>
  #include <linux/notifier.h>
208d54e55   Dave Hansen   [PATCH] memory ho...
8

78679302f   KAMEZAWA Hiroyuki   [PATCH] memory-ho...
9
10
11
  struct page;
  struct zone;
  struct pglist_data;
208d54e55   Dave Hansen   [PATCH] memory ho...
12
13
14
15
16
17
18
19
20
21
22
23
  #ifdef CONFIG_MEMORY_HOTPLUG
  /*
   * pgdat resizing functions
   */
  static inline
  void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
  {
  	spin_lock_irqsave(&pgdat->node_size_lock, *flags);
  }
  static inline
  void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
  {
bdc8cb984   Dave Hansen   [PATCH] memory ho...
24
  	spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
208d54e55   Dave Hansen   [PATCH] memory ho...
25
26
27
28
29
30
  }
  static inline
  void pgdat_resize_init(struct pglist_data *pgdat)
  {
  	spin_lock_init(&pgdat->node_size_lock);
  }
bdc8cb984   Dave Hansen   [PATCH] memory ho...
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
  /*
   * Zone resizing functions
   */
  static inline unsigned zone_span_seqbegin(struct zone *zone)
  {
  	return read_seqbegin(&zone->span_seqlock);
  }
  static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
  {
  	return read_seqretry(&zone->span_seqlock, iv);
  }
  static inline void zone_span_writelock(struct zone *zone)
  {
  	write_seqlock(&zone->span_seqlock);
  }
  static inline void zone_span_writeunlock(struct zone *zone)
  {
  	write_sequnlock(&zone->span_seqlock);
  }
  static inline void zone_seqlock_init(struct zone *zone)
  {
  	seqlock_init(&zone->span_seqlock);
  }
3947be196   Dave Hansen   [PATCH] memory ho...
54
55
56
57
58
59
  extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
  extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
  extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
  /* need some defines for these for archs that don't support it */
  extern void online_page(struct page *page);
  /* VM interface that may be used by firmware interface */
3947be196   Dave Hansen   [PATCH] memory ho...
60
61
62
63
64
  extern int online_pages(unsigned long, unsigned long);
  
  /* reasonably generic interface to expand the physical pages in a zone  */
  extern int __add_pages(struct zone *zone, unsigned long start_pfn,
  	unsigned long nr_pages);
bc02af93d   Yasunori Goto   [PATCH] pgdat all...
65
66
67
68
69
70
71
72
73
  
  #ifdef CONFIG_NUMA
  extern int memory_add_physaddr_to_nid(u64 start);
  #else
  static inline int memory_add_physaddr_to_nid(u64 start)
  {
  	return 0;
  }
  #endif
306d6cbe8   Yasunori Goto   [PATCH] pgdat all...
74
75
76
77
78
79
80
81
82
83
84
  #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
  /*
   * For supporting node-hotadd, we have to allocate a new pgdat.
   *
   * If an arch has generic style NODE_DATA(),
   * node_data[nid] = kzalloc() works well. But it depends on the architecture.
   *
   * In general, generic_alloc_nodedata() is used.
   * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
   *
   */
dd0932d9d   Yasunori Goto   [PATCH] pgdat all...
85
86
  extern pg_data_t *arch_alloc_nodedata(int nid);
  extern void arch_free_nodedata(pg_data_t *pgdat);
7049027c6   Yasunori Goto   [PATCH] pgdat all...
87
  extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
306d6cbe8   Yasunori Goto   [PATCH] pgdat all...
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
  
  #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
  
  #define arch_alloc_nodedata(nid)	generic_alloc_nodedata(nid)
  #define arch_free_nodedata(pgdat)	generic_free_nodedata(pgdat)
  
  #ifdef CONFIG_NUMA
  /*
   * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
   * XXX: kmalloc_node() can't work well to get new node's memory at this time.
   *	Because, pgdat for the new node is not allocated/initialized yet itself.
   *	To use new node's memory, more consideration will be necessary.
   */
  #define generic_alloc_nodedata(nid)				\
  ({								\
  	kzalloc(sizeof(pg_data_t), GFP_KERNEL);			\
  })
  /*
   * This definition is just for error path in node hotadd.
   * For node hotremove, we have to replace this.
   */
  #define generic_free_nodedata(pgdat)	kfree(pgdat)
10ad400b4   Yasunori Goto   [PATCH] pgdat all...
110
111
112
113
114
  extern pg_data_t *node_data[];
  static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
  {
  	node_data[nid] = pgdat;
  }
306d6cbe8   Yasunori Goto   [PATCH] pgdat all...
115
116
117
118
119
120
121
122
123
124
125
  #else /* !CONFIG_NUMA */
  
  /* never called */
  static inline pg_data_t *generic_alloc_nodedata(int nid)
  {
  	BUG();
  	return NULL;
  }
  static inline void generic_free_nodedata(pg_data_t *pgdat)
  {
  }
10ad400b4   Yasunori Goto   [PATCH] pgdat all...
126
127
128
  static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
  {
  }
306d6cbe8   Yasunori Goto   [PATCH] pgdat all...
129
130
  #endif /* CONFIG_NUMA */
  #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
208d54e55   Dave Hansen   [PATCH] memory ho...
131
132
133
134
135
136
137
  #else /* ! CONFIG_MEMORY_HOTPLUG */
  /*
   * Stub functions for when hotplug is off
   */
  static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
  static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
  static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
bdc8cb984   Dave Hansen   [PATCH] memory ho...
138
139
140
141
142
143
144
145
146
147
148
149
  
  static inline unsigned zone_span_seqbegin(struct zone *zone)
  {
  	return 0;
  }
  static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
  {
  	return 0;
  }
  static inline void zone_span_writelock(struct zone *zone) {}
  static inline void zone_span_writeunlock(struct zone *zone) {}
  static inline void zone_seqlock_init(struct zone *zone) {}
3947be196   Dave Hansen   [PATCH] memory ho...
150
151
152
153
154
155
156
157
  
  static inline int mhp_notimplemented(const char *func)
  {
  	printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled
  ", func);
  	dump_stack();
  	return -ENOSYS;
  }
bdc8cb984   Dave Hansen   [PATCH] memory ho...
158
  #endif /* ! CONFIG_MEMORY_HOTPLUG */
3947be196   Dave Hansen   [PATCH] memory ho...
159
160
161
162
163
164
165
166
  static inline int __remove_pages(struct zone *zone, unsigned long start_pfn,
  	unsigned long nr_pages)
  {
  	printk(KERN_WARNING "%s() called, not yet supported
  ", __FUNCTION__);
  	dump_stack();
  	return -ENOSYS;
  }
9d99aaa31   Andi Kleen   [PATCH] x86_64: S...
167

bc02af93d   Yasunori Goto   [PATCH] pgdat all...
168
169
  extern int add_memory(int nid, u64 start, u64 size);
  extern int arch_add_memory(int nid, u64 start, u64 size);
9d99aaa31   Andi Kleen   [PATCH] x86_64: S...
170
  extern int remove_memory(u64 start, u64 size);
f28c5edc0   Keith Mannthey   [PATCH] hot-add-m...
171
172
  extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
  								int nr_pages);
9d99aaa31   Andi Kleen   [PATCH] x86_64: S...
173

208d54e55   Dave Hansen   [PATCH] memory ho...
174
  #endif /* __LINUX_MEMORY_HOTPLUG_H */