Blame view

arch/x86/include/asm/dma.h 9.56 KB
f28b8d608   Thomas Gleixner   x86: merge includ...
1
2
3
4
5
6
  /*
   * linux/include/asm/dma.h: Defines for using and allocating dma channels.
   * Written by Hennus Bergman, 1992.
   * High DMA channel support & info by Hannu Savolainen
   * and John Boyd, Nov. 1992.
   */
1965aae3c   H. Peter Anvin   x86: Fix ASM_X86_...
7
8
  #ifndef _ASM_X86_DMA_H
  #define _ASM_X86_DMA_H
f28b8d608   Thomas Gleixner   x86: merge includ...
9
10
11
  
  #include <linux/spinlock.h>	/* And spinlocks */
  #include <asm/io.h>		/* need byte IO */
f28b8d608   Thomas Gleixner   x86: merge includ...
12

f28b8d608   Thomas Gleixner   x86: merge includ...
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
  #ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
  #define dma_outb	outb_p
  #else
  #define dma_outb	outb
  #endif
  
  #define dma_inb		inb
  
  /*
   * NOTES about DMA transfers:
   *
   *  controller 1: channels 0-3, byte operations, ports 00-1F
   *  controller 2: channels 4-7, word operations, ports C0-DF
   *
   *  - ALL registers are 8 bits only, regardless of transfer size
   *  - channel 4 is not used - cascades 1 into 2.
   *  - channels 0-3 are byte - addresses/counts are for physical bytes
   *  - channels 5-7 are word - addresses/counts are for physical words
   *  - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
   *  - transfer count loaded to registers is 1 less than actual count
   *  - controller 2 offsets are all even (2x offsets for controller 1)
   *  - page registers for 5-7 don't use data bit 0, represent 128K pages
   *  - page registers for 0-3 use bit 0, represent 64K pages
   *
   * DMA transfers are limited to the lower 16MB of _physical_ memory.
   * Note that addresses loaded into registers must be _physical_ addresses,
   * not logical addresses (which may differ if paging is active).
   *
   *  Address mapping for channels 0-3:
   *
   *   A23 ... A16 A15 ... A8  A7 ... A0    (Physical addresses)
   *    |  ...  |   |  ... |   |  ... |
   *    |  ...  |   |  ... |   |  ... |
   *    |  ...  |   |  ... |   |  ... |
   *   P7  ...  P0  A7 ... A0  A7 ... A0
   * |    Page    | Addr MSB | Addr LSB |   (DMA registers)
   *
   *  Address mapping for channels 5-7:
   *
   *   A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0    (Physical addresses)
   *    |  ...  |   \   \   ... \  \  \  ... \  \
   *    |  ...  |    \   \   ... \  \  \  ... \  (not used)
   *    |  ...  |     \   \   ... \  \  \  ... \
   *   P7  ...  P1 (0) A7 A6  ... A0 A7 A6 ... A0
   * |      Page      |  Addr MSB   |  Addr LSB  |   (DMA registers)
   *
   * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
   * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
   * the hardware level, so odd-byte transfers aren't possible).
   *
   * Transfer count (_not # bytes_) is limited to 64K, represented as actual
   * count - 1 : 64K => 0xFFFF, 1 => 0x0000.  Thus, count is always 1 or more,
   * and up to 128K bytes may be transferred on channels 5-7 in one operation.
   *
   */
  
  #define MAX_DMA_CHANNELS	8
744baba0c   Tejun Heo   x86, NUMA: Enable...
70
71
  /* 16MB ISA DMA zone */
  #define MAX_DMA_PFN   ((16 * 1024 * 1024) >> PAGE_SHIFT)
1b7e03ef7   Tejun Heo   x86, NUMA: Enable...
72
73
  /* 4GB broken PCI/AGP hardware bus master zone */
  #define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
f28b8d608   Thomas Gleixner   x86: merge includ...
74

1b7e03ef7   Tejun Heo   x86, NUMA: Enable...
75
  #ifdef CONFIG_X86_32
f28b8d608   Thomas Gleixner   x86: merge includ...
76
  /* The maximum address that we can perform a DMA transfer to on this platform */
113cbebac   Joe Perches   include/asm-x86/d...
77
  #define MAX_DMA_ADDRESS      (PAGE_OFFSET + 0x1000000)
f28b8d608   Thomas Gleixner   x86: merge includ...
78
  #else
f28b8d608   Thomas Gleixner   x86: merge includ...
79
80
  /* Compat define for old dma zone */
  #define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))
f28b8d608   Thomas Gleixner   x86: merge includ...
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
  #endif
  
  /* 8237 DMA controllers */
  #define IO_DMA1_BASE	0x00	/* 8 bit slave DMA, channels 0..3 */
  #define IO_DMA2_BASE	0xC0	/* 16 bit master DMA, ch 4(=slave input)..7 */
  
  /* DMA controller registers */
  #define DMA1_CMD_REG		0x08	/* command register (w) */
  #define DMA1_STAT_REG		0x08	/* status register (r) */
  #define DMA1_REQ_REG		0x09    /* request register (w) */
  #define DMA1_MASK_REG		0x0A	/* single-channel mask (w) */
  #define DMA1_MODE_REG		0x0B	/* mode register (w) */
  #define DMA1_CLEAR_FF_REG	0x0C	/* clear pointer flip-flop (w) */
  #define DMA1_TEMP_REG		0x0D    /* Temporary Register (r) */
  #define DMA1_RESET_REG		0x0D	/* Master Clear (w) */
  #define DMA1_CLR_MASK_REG       0x0E    /* Clear Mask */
  #define DMA1_MASK_ALL_REG       0x0F    /* all-channels mask (w) */
  
  #define DMA2_CMD_REG		0xD0	/* command register (w) */
  #define DMA2_STAT_REG		0xD0	/* status register (r) */
  #define DMA2_REQ_REG		0xD2    /* request register (w) */
  #define DMA2_MASK_REG		0xD4	/* single-channel mask (w) */
  #define DMA2_MODE_REG		0xD6	/* mode register (w) */
  #define DMA2_CLEAR_FF_REG	0xD8	/* clear pointer flip-flop (w) */
  #define DMA2_TEMP_REG		0xDA    /* Temporary Register (r) */
  #define DMA2_RESET_REG		0xDA	/* Master Clear (w) */
  #define DMA2_CLR_MASK_REG       0xDC    /* Clear Mask */
  #define DMA2_MASK_ALL_REG       0xDE    /* all-channels mask (w) */
  
  #define DMA_ADDR_0		0x00    /* DMA address registers */
  #define DMA_ADDR_1		0x02
  #define DMA_ADDR_2		0x04
  #define DMA_ADDR_3		0x06
  #define DMA_ADDR_4		0xC0
  #define DMA_ADDR_5		0xC4
  #define DMA_ADDR_6		0xC8
  #define DMA_ADDR_7		0xCC
  
  #define DMA_CNT_0		0x01    /* DMA count registers */
  #define DMA_CNT_1		0x03
  #define DMA_CNT_2		0x05
  #define DMA_CNT_3		0x07
  #define DMA_CNT_4		0xC2
  #define DMA_CNT_5		0xC6
  #define DMA_CNT_6		0xCA
  #define DMA_CNT_7		0xCE
  
  #define DMA_PAGE_0		0x87    /* DMA page registers */
  #define DMA_PAGE_1		0x83
  #define DMA_PAGE_2		0x81
  #define DMA_PAGE_3		0x82
  #define DMA_PAGE_5		0x8B
  #define DMA_PAGE_6		0x89
  #define DMA_PAGE_7		0x8A
  
  /* I/O to memory, no autoinit, increment, single mode */
  #define DMA_MODE_READ		0x44
  /* memory to I/O, no autoinit, increment, single mode */
  #define DMA_MODE_WRITE		0x48
  /* pass thru DREQ->HRQ, DACK<-HLDA only */
  #define DMA_MODE_CASCADE	0xC0
  
  #define DMA_AUTOINIT		0x10
1c00f0161   David Rientjes   x86: allow CONFIG...
144
  #ifdef CONFIG_ISA_DMA_API
f28b8d608   Thomas Gleixner   x86: merge includ...
145
  extern spinlock_t  dma_spin_lock;
113cbebac   Joe Perches   include/asm-x86/d...
146
  static inline unsigned long claim_dma_lock(void)
f28b8d608   Thomas Gleixner   x86: merge includ...
147
148
149
150
151
  {
  	unsigned long flags;
  	spin_lock_irqsave(&dma_spin_lock, flags);
  	return flags;
  }
113cbebac   Joe Perches   include/asm-x86/d...
152
  static inline void release_dma_lock(unsigned long flags)
f28b8d608   Thomas Gleixner   x86: merge includ...
153
154
155
  {
  	spin_unlock_irqrestore(&dma_spin_lock, flags);
  }
1c00f0161   David Rientjes   x86: allow CONFIG...
156
  #endif /* CONFIG_ISA_DMA_API */
f28b8d608   Thomas Gleixner   x86: merge includ...
157
158
  
  /* enable/disable a specific DMA channel */
113cbebac   Joe Perches   include/asm-x86/d...
159
  static inline void enable_dma(unsigned int dmanr)
f28b8d608   Thomas Gleixner   x86: merge includ...
160
161
162
163
164
165
  {
  	if (dmanr <= 3)
  		dma_outb(dmanr, DMA1_MASK_REG);
  	else
  		dma_outb(dmanr & 3, DMA2_MASK_REG);
  }
113cbebac   Joe Perches   include/asm-x86/d...
166
  static inline void disable_dma(unsigned int dmanr)
f28b8d608   Thomas Gleixner   x86: merge includ...
167
168
169
170
171
172
173
174
175
176
177
178
179
180
  {
  	if (dmanr <= 3)
  		dma_outb(dmanr | 4, DMA1_MASK_REG);
  	else
  		dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
  }
  
  /* Clear the 'DMA Pointer Flip Flop'.
   * Write 0 for LSB/MSB, 1 for MSB/LSB access.
   * Use this once to initialize the FF to a known state.
   * After that, keep track of it. :-)
   * --- In order to do that, the DMA routines below should ---
   * --- only be used while holding the DMA lock ! ---
   */
113cbebac   Joe Perches   include/asm-x86/d...
181
  static inline void clear_dma_ff(unsigned int dmanr)
f28b8d608   Thomas Gleixner   x86: merge includ...
182
183
184
185
186
187
188
189
  {
  	if (dmanr <= 3)
  		dma_outb(0, DMA1_CLEAR_FF_REG);
  	else
  		dma_outb(0, DMA2_CLEAR_FF_REG);
  }
  
  /* set mode (above) for a specific DMA channel */
113cbebac   Joe Perches   include/asm-x86/d...
190
  static inline void set_dma_mode(unsigned int dmanr, char mode)
f28b8d608   Thomas Gleixner   x86: merge includ...
191
192
193
194
195
196
197
198
199
200
201
202
  {
  	if (dmanr <= 3)
  		dma_outb(mode | dmanr, DMA1_MODE_REG);
  	else
  		dma_outb(mode | (dmanr & 3), DMA2_MODE_REG);
  }
  
  /* Set only the page register bits of the transfer address.
   * This is used for successive transfers when we know the contents of
   * the lower 16 bits of the DMA current address register, but a 64k boundary
   * may have been crossed.
   */
113cbebac   Joe Perches   include/asm-x86/d...
203
  static inline void set_dma_page(unsigned int dmanr, char pagenr)
f28b8d608   Thomas Gleixner   x86: merge includ...
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
  {
  	switch (dmanr) {
  	case 0:
  		dma_outb(pagenr, DMA_PAGE_0);
  		break;
  	case 1:
  		dma_outb(pagenr, DMA_PAGE_1);
  		break;
  	case 2:
  		dma_outb(pagenr, DMA_PAGE_2);
  		break;
  	case 3:
  		dma_outb(pagenr, DMA_PAGE_3);
  		break;
  	case 5:
  		dma_outb(pagenr & 0xfe, DMA_PAGE_5);
  		break;
  	case 6:
  		dma_outb(pagenr & 0xfe, DMA_PAGE_6);
  		break;
  	case 7:
  		dma_outb(pagenr & 0xfe, DMA_PAGE_7);
  		break;
  	}
  }
  
  
  /* Set transfer address & page bits for specific DMA channel.
   * Assumes dma flipflop is clear.
   */
113cbebac   Joe Perches   include/asm-x86/d...
234
  static inline void set_dma_addr(unsigned int dmanr, unsigned int a)
f28b8d608   Thomas Gleixner   x86: merge includ...
235
236
237
238
239
240
  {
  	set_dma_page(dmanr, a>>16);
  	if (dmanr <= 3)  {
  		dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
  		dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
  	}  else  {
113cbebac   Joe Perches   include/asm-x86/d...
241
242
  		dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
  		dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
f28b8d608   Thomas Gleixner   x86: merge includ...
243
244
245
246
247
248
249
250
251
252
253
254
  	}
  }
  
  
  /* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for
   * a specific DMA channel.
   * You must ensure the parameters are valid.
   * NOTE: from a manual: "the number of transfers is one more
   * than the initial word count"! This is taken into account.
   * Assumes dma flip-flop is clear.
   * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
   */
113cbebac   Joe Perches   include/asm-x86/d...
255
  static inline void set_dma_count(unsigned int dmanr, unsigned int count)
f28b8d608   Thomas Gleixner   x86: merge includ...
256
257
258
  {
  	count--;
  	if (dmanr <= 3)  {
113cbebac   Joe Perches   include/asm-x86/d...
259
260
261
  		dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
  		dma_outb((count >> 8) & 0xff,
  			 ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
f28b8d608   Thomas Gleixner   x86: merge includ...
262
  	} else {
113cbebac   Joe Perches   include/asm-x86/d...
263
264
265
266
  		dma_outb((count >> 1) & 0xff,
  			 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
  		dma_outb((count >> 9) & 0xff,
  			 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
f28b8d608   Thomas Gleixner   x86: merge includ...
267
268
269
270
271
272
273
274
275
276
277
278
  	}
  }
  
  
  /* Get DMA residue count. After a DMA transfer, this
   * should return zero. Reading this while a DMA transfer is
   * still in progress will return unpredictable results.
   * If called before the channel has been used, it may return 1.
   * Otherwise, it returns the number of _bytes_ left to transfer.
   *
   * Assumes DMA flip-flop is clear.
   */
113cbebac   Joe Perches   include/asm-x86/d...
279
  static inline int get_dma_residue(unsigned int dmanr)
f28b8d608   Thomas Gleixner   x86: merge includ...
280
281
282
283
284
285
286
287
288
289
290
291
292
  {
  	unsigned int io_port;
  	/* using short to get 16-bit wrap around */
  	unsigned short count;
  
  	io_port = (dmanr <= 3) ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
  		: ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE;
  
  	count = 1 + dma_inb(io_port);
  	count += dma_inb(io_port) << 8;
  
  	return (dmanr <= 3) ? count : (count << 1);
  }
1c00f0161   David Rientjes   x86: allow CONFIG...
293
294
  /* These are in kernel/dma.c because x86 uses CONFIG_GENERIC_ISA_DMA */
  #ifdef CONFIG_ISA_DMA_API
f28b8d608   Thomas Gleixner   x86: merge includ...
295
296
  extern int request_dma(unsigned int dmanr, const char *device_id);
  extern void free_dma(unsigned int dmanr);
1c00f0161   David Rientjes   x86: allow CONFIG...
297
  #endif
f28b8d608   Thomas Gleixner   x86: merge includ...
298
299
300
301
302
  
  /* From PCI */
  
  #ifdef CONFIG_PCI
  extern int isa_dma_bridge_buggy;
96a388de5   Thomas Gleixner   i386/x86_64: move...
303
  #else
f28b8d608   Thomas Gleixner   x86: merge includ...
304
  #define isa_dma_bridge_buggy	(0)
96a388de5   Thomas Gleixner   i386/x86_64: move...
305
  #endif
f28b8d608   Thomas Gleixner   x86: merge includ...
306

1965aae3c   H. Peter Anvin   x86: Fix ASM_X86_...
307
  #endif /* _ASM_X86_DMA_H */