Blame view

drivers/scsi/mvumi.c 69.2 KB
873e65bc0   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-only
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2
3
4
5
  /*
   * Marvell UMI driver
   *
   * Copyright 2011 Marvell. <jyli@marvell.com>
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
6
7
8
9
10
11
12
13
14
15
16
17
  */
  
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/moduleparam.h>
  #include <linux/init.h>
  #include <linux/device.h>
  #include <linux/pci.h>
  #include <linux/list.h>
  #include <linux/spinlock.h>
  #include <linux/interrupt.h>
  #include <linux/delay.h>
36f8ef7f7   Tina Ruchandani   mvumi: 64bit valu...
18
  #include <linux/ktime.h>
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
19
20
21
22
  #include <linux/blkdev.h>
  #include <linux/io.h>
  #include <scsi/scsi.h>
  #include <scsi/scsi_cmnd.h>
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
23
  #include <scsi/scsi_device.h>
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
24
25
26
27
  #include <scsi/scsi_host.h>
  #include <scsi/scsi_transport.h>
  #include <scsi/scsi_eh.h>
  #include <linux/uaccess.h>
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
28
  #include <linux/kthread.h>
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
29
30
31
32
33
34
  
  #include "mvumi.h"
  
  MODULE_LICENSE("GPL");
  MODULE_AUTHOR("jyli@marvell.com");
  MODULE_DESCRIPTION("Marvell UMI Driver");
9baa3c34a   Benoit Taine   PCI: Remove DEFIN...
35
  static const struct pci_device_id mvumi_pci_table[] = {
c85bcadc7   Myron Stowe   [SCSI] mvumi: Use...
36
37
  	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) },
  	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) },
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
  	{ 0 }
  };
  
  MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
  
  static void tag_init(struct mvumi_tag *st, unsigned short size)
  {
  	unsigned short i;
  	BUG_ON(size != st->size);
  	st->top = size;
  	for (i = 0; i < size; i++)
  		st->stack[i] = size - 1 - i;
  }
  
  static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
  {
  	BUG_ON(st->top <= 0);
  	return st->stack[--st->top];
  }
  
  static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
  							unsigned short tag)
  {
  	BUG_ON(st->top >= st->size);
  	st->stack[st->top++] = tag;
  }
  
  static bool tag_is_empty(struct mvumi_tag *st)
  {
  	if (st->top == 0)
  		return 1;
  	else
  		return 0;
  }
  
  static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
  {
  	int i;
  
  	for (i = 0; i < MAX_BASE_ADDRESS; i++)
  		if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
  								addr_array[i])
  			pci_iounmap(dev, addr_array[i]);
  }
  
  static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
  {
  	int i;
  
  	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
  		if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
  			addr_array[i] = pci_iomap(dev, i, 0);
  			if (!addr_array[i]) {
  				dev_err(&dev->dev, "failed to map Bar[%d]
  ",
  									i);
  				mvumi_unmap_pci_addr(dev, addr_array);
  				return -ENOMEM;
  			}
  		} else
  			addr_array[i] = NULL;
  
  		dev_dbg(&dev->dev, "Bar %d : %p.
  ", i, addr_array[i]);
  	}
  
  	return 0;
  }
  
  static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
  				enum resource_type type, unsigned int size)
  {
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
110
  	struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
111
112
113
  
  	if (!res) {
  		dev_err(&mhba->pdev->dev,
59e13d483   Masanari Iida   scsi: fix various...
114
115
  			"Failed to allocate memory for resource manager.
  ");
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
116
117
118
119
120
  		return NULL;
  	}
  
  	switch (type) {
  	case RESOURCE_CACHED_MEMORY:
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
121
  		res->virt_addr = kzalloc(size, GFP_ATOMIC);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
122
123
124
125
126
127
128
129
130
131
132
  		if (!res->virt_addr) {
  			dev_err(&mhba->pdev->dev,
  				"unable to allocate memory,size = %d.
  ", size);
  			kfree(res);
  			return NULL;
  		}
  		break;
  
  	case RESOURCE_UNCACHED_MEMORY:
  		size = round_up(size, 8);
750afb08c   Luis Chamberlain   cross-tree: phase...
133
134
135
  		res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size,
  						    &res->bus_addr,
  						    GFP_KERNEL);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
136
137
138
139
140
141
142
143
  		if (!res->virt_addr) {
  			dev_err(&mhba->pdev->dev,
  					"unable to allocate consistent mem,"
  							"size = %d.
  ", size);
  			kfree(res);
  			return NULL;
  		}
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
  		break;
  
  	default:
  		dev_err(&mhba->pdev->dev, "unknown resource type %d.
  ", type);
  		kfree(res);
  		return NULL;
  	}
  
  	res->type = type;
  	res->size = size;
  	INIT_LIST_HEAD(&res->entry);
  	list_add_tail(&res->entry, &mhba->res_list);
  
  	return res;
  }
  
  static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
  {
  	struct mvumi_res *res, *tmp;
  
  	list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
  		switch (res->type) {
  		case RESOURCE_UNCACHED_MEMORY:
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
168
  			dma_free_coherent(&mhba->pdev->dev, res->size,
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
  						res->virt_addr, res->bus_addr);
  			break;
  		case RESOURCE_CACHED_MEMORY:
  			kfree(res->virt_addr);
  			break;
  		default:
  			dev_err(&mhba->pdev->dev,
  				"unknown resource type %d
  ", res->type);
  			break;
  		}
  		list_del(&res->entry);
  		kfree(res);
  	}
  	mhba->fw_flag &= ~MVUMI_FW_ALLOC;
  }
  
  /**
   * mvumi_make_sgl -	Prepares  SGL
   * @mhba:		Adapter soft state
   * @scmd:		SCSI command from the mid-layer
   * @sgl_p:		SGL to be filled in
   * @sg_count		return the number of SG elements
   *
   * If successful, this function returns 0. otherwise, it returns -1.
   */
  static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
  					void *sgl_p, unsigned char *sg_count)
  {
  	struct scatterlist *sg;
  	struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
  	unsigned int i;
  	unsigned int sgnum = scsi_sg_count(scmd);
  	dma_addr_t busaddr;
3c1a30df6   Ming Lei   scsi: mvumi: use ...
203
  	*sg_count = dma_map_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
204
  			       scmd->sc_data_direction);
4bd13a077   Alexey Khoroshilov   scsi: mvumi: remo...
205
206
207
208
209
  	if (*sg_count > mhba->max_sge) {
  		dev_err(&mhba->pdev->dev,
  			"sg count[0x%x] is bigger than max sg[0x%x].
  ",
  			*sg_count, mhba->max_sge);
3c1a30df6   Ming Lei   scsi: mvumi: use ...
210
  		dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
211
  			     scmd->sc_data_direction);
4bd13a077   Alexey Khoroshilov   scsi: mvumi: remo...
212
213
  		return -1;
  	}
3c1a30df6   Ming Lei   scsi: mvumi: use ...
214
215
  	scsi_for_each_sg(scmd, sg, *sg_count, i) {
  		busaddr = sg_dma_address(sg);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
216
217
  		m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
  		m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
4bd13a077   Alexey Khoroshilov   scsi: mvumi: remo...
218
  		m_sg->flags = 0;
3c1a30df6   Ming Lei   scsi: mvumi: use ...
219
  		sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(sg)));
4bd13a077   Alexey Khoroshilov   scsi: mvumi: remo...
220
221
222
223
  		if ((i + 1) == *sg_count)
  			m_sg->flags |= 1U << mhba->eot_flag;
  
  		sgd_inc(mhba, m_sg);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
224
225
226
227
228
229
230
231
232
233
234
235
236
237
  	}
  
  	return 0;
  }
  
  static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
  							unsigned int size)
  {
  	struct mvumi_sgl *m_sg;
  	void *virt_addr;
  	dma_addr_t phy_addr;
  
  	if (size == 0)
  		return 0;
750afb08c   Luis Chamberlain   cross-tree: phase...
238
239
  	virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr,
  				       GFP_KERNEL);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
240
241
  	if (!virt_addr)
  		return -1;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
242
243
244
245
246
247
  	m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
  	cmd->frame->sg_counts = 1;
  	cmd->data_buf = virt_addr;
  
  	m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
  	m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
248
249
  	m_sg->flags = 1U << mhba->eot_flag;
  	sgd_setsz(mhba, m_sg, cpu_to_le32(size));
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
  
  	return 0;
  }
  
  static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
  				unsigned int buf_size)
  {
  	struct mvumi_cmd *cmd;
  
  	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  	if (!cmd) {
  		dev_err(&mhba->pdev->dev, "failed to create a internal cmd
  ");
  		return NULL;
  	}
  	INIT_LIST_HEAD(&cmd->queue_pointer);
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
266
267
  	cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size,
  			&cmd->frame_phys, GFP_KERNEL);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
268
269
270
271
272
273
274
275
276
277
278
279
280
  	if (!cmd->frame) {
  		dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
  			" frame,size = %d.
  ", mhba->ib_max_size);
  		kfree(cmd);
  		return NULL;
  	}
  
  	if (buf_size) {
  		if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
  			dev_err(&mhba->pdev->dev, "failed to allocate memory"
  						" for internal frame
  ");
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
281
  			dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
282
  					cmd->frame, cmd->frame_phys);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
  			kfree(cmd);
  			return NULL;
  		}
  	} else
  		cmd->frame->sg_counts = 0;
  
  	return cmd;
  }
  
  static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
  						struct mvumi_cmd *cmd)
  {
  	struct mvumi_sgl *m_sg;
  	unsigned int size;
  	dma_addr_t phy_addr;
  
  	if (cmd && cmd->frame) {
  		if (cmd->frame->sg_counts) {
  			m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
302
  			sgd_getsz(mhba, m_sg, size);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
303
304
305
  
  			phy_addr = (dma_addr_t) m_sg->baseaddr_l |
  				(dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
306
  			dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf,
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
307
308
  								phy_addr);
  		}
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
309
  		dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
310
  				cmd->frame, cmd->frame_phys);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
  		kfree(cmd);
  	}
  }
  
  /**
   * mvumi_get_cmd -	Get a command from the free pool
   * @mhba:		Adapter soft state
   *
   * Returns a free command from the pool
   */
  static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
  {
  	struct mvumi_cmd *cmd = NULL;
  
  	if (likely(!list_empty(&mhba->cmd_pool))) {
  		cmd = list_entry((&mhba->cmd_pool)->next,
  				struct mvumi_cmd, queue_pointer);
  		list_del_init(&cmd->queue_pointer);
  	} else
  		dev_warn(&mhba->pdev->dev, "command pool is empty!
  ");
  
  	return cmd;
  }
  
  /**
   * mvumi_return_cmd -	Return a cmd to free command pool
   * @mhba:		Adapter soft state
   * @cmd:		Command packet to be returned to free command pool
   */
  static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
  						struct mvumi_cmd *cmd)
  {
  	cmd->scmd = NULL;
  	list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
  }
  
  /**
   * mvumi_free_cmds -	Free all the cmds in the free cmd pool
   * @mhba:		Adapter soft state
   */
  static void mvumi_free_cmds(struct mvumi_hba *mhba)
  {
  	struct mvumi_cmd *cmd;
  
  	while (!list_empty(&mhba->cmd_pool)) {
  		cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
  							queue_pointer);
  		list_del(&cmd->queue_pointer);
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
360
361
  		if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
  			kfree(cmd->frame);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
  		kfree(cmd);
  	}
  }
  
  /**
   * mvumi_alloc_cmds -	Allocates the command packets
   * @mhba:		Adapter soft state
   *
   */
  static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
  {
  	int i;
  	struct mvumi_cmd *cmd;
  
  	for (i = 0; i < mhba->max_io; i++) {
  		cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  		if (!cmd)
  			goto err_exit;
  
  		INIT_LIST_HEAD(&cmd->queue_pointer);
  		list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
383
384
385
386
387
388
  		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
  			cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
  			cmd->frame_phys = mhba->ib_frame_phys
  						+ i * mhba->ib_max_size;
  		} else
  			cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
389
390
391
392
393
394
395
396
397
398
399
400
401
  		if (!cmd->frame)
  			goto err_exit;
  	}
  	return 0;
  
  err_exit:
  	dev_err(&mhba->pdev->dev,
  			"failed to allocate memory for cmd[0x%x].
  ", i);
  	while (!list_empty(&mhba->cmd_pool)) {
  		cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
  						queue_pointer);
  		list_del(&cmd->queue_pointer);
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
402
403
  		if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
  			kfree(cmd->frame);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
404
405
406
407
  		kfree(cmd);
  	}
  	return -ENOMEM;
  }
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
408
  static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
409
  {
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
410
411
412
413
  	unsigned int ib_rp_reg;
  	struct mvumi_hw_regs *regs = mhba->regs;
  
  	ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
414

bd756ddea   Shun Fu   [SCSI] mvumi: Add...
415
416
417
418
419
420
421
422
  	if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
  			(mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
  			((ib_rp_reg & regs->cl_pointer_toggle)
  			 != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
  		dev_warn(&mhba->pdev->dev, "no free slot to use.
  ");
  		return 0;
  	}
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
423
424
425
  	if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
  		dev_warn(&mhba->pdev->dev, "firmware io overflow.
  ");
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
426
427
428
  		return 0;
  	} else {
  		return mhba->max_io - atomic_read(&mhba->fw_outstanding);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
429
  	}
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
430
  }
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
431

bd756ddea   Shun Fu   [SCSI] mvumi: Add...
432
433
434
435
436
437
438
439
440
441
442
443
444
445
  static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
  {
  	unsigned int count;
  	if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
  		return 0;
  	count = ioread32(mhba->ib_shadow);
  	if (count == 0xffff)
  		return 0;
  	return count;
  }
  
  static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
  {
  	unsigned int cur_ib_entry;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
446

bd756ddea   Shun Fu   [SCSI] mvumi: Add...
447
  	cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
448
449
450
  	cur_ib_entry++;
  	if (cur_ib_entry >= mhba->list_num_io) {
  		cur_ib_entry -= mhba->list_num_io;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
451
452
453
454
455
456
457
458
459
  		mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
  	}
  	mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
  	mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
  	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
  		*ib_entry = mhba->ib_list + cur_ib_entry *
  				sizeof(struct mvumi_dyn_list_entry);
  	} else {
  		*ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
460
  	}
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
461
  	atomic_inc(&mhba->fw_outstanding);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
462
463
464
465
  }
  
  static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
  {
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
466
467
  	iowrite32(0xffff, mhba->ib_shadow);
  	iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
  }
  
  static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
  		unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
  {
  	unsigned short tag, request_id;
  
  	udelay(1);
  	p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
  	request_id = p_outb_frame->request_id;
  	tag = p_outb_frame->tag;
  	if (tag > mhba->tag_pool.size) {
  		dev_err(&mhba->pdev->dev, "ob frame data error
  ");
  		return -1;
  	}
  	if (mhba->tag_cmd[tag] == NULL) {
  		dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command
  ", tag);
  		return -1;
  	} else if (mhba->tag_cmd[tag]->request_id != request_id &&
  						mhba->request_id_enabled) {
  			dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
  					"cmd request ID:0x%x
  ", request_id,
  					mhba->tag_cmd[tag]->request_id);
  			return -1;
  	}
  
  	return 0;
  }
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
499
500
  static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
  			unsigned int *cur_obf, unsigned int *assign_obf_end)
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
501
  {
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
502
503
  	unsigned int ob_write, ob_write_shadow;
  	struct mvumi_hw_regs *regs = mhba->regs;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
504
505
  
  	do {
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
506
507
508
  		ob_write = ioread32(regs->outb_copy_pointer);
  		ob_write_shadow = ioread32(mhba->ob_shadow);
  	} while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
509

bd756ddea   Shun Fu   [SCSI] mvumi: Add...
510
511
  	*cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
  	*assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
512

bd756ddea   Shun Fu   [SCSI] mvumi: Add...
513
514
515
  	if ((ob_write & regs->cl_pointer_toggle) !=
  			(mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
  		*assign_obf_end += mhba->list_num_io;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
516
  	}
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
  	return 0;
  }
  
  static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
  			unsigned int *cur_obf, unsigned int *assign_obf_end)
  {
  	unsigned int ob_write;
  	struct mvumi_hw_regs *regs = mhba->regs;
  
  	ob_write = ioread32(regs->outb_read_pointer);
  	ob_write = ioread32(regs->outb_copy_pointer);
  	*cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
  	*assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
  	if (*assign_obf_end < *cur_obf)
  		*assign_obf_end += mhba->list_num_io;
  	else if (*assign_obf_end == *cur_obf)
  		return -1;
  	return 0;
  }
  
  static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
  {
  	unsigned int cur_obf, assign_obf_end, i;
  	struct mvumi_ob_data *ob_data;
  	struct mvumi_rsp_frame *p_outb_frame;
  	struct mvumi_hw_regs *regs = mhba->regs;
  
  	if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
  		return;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
546
547
548
549
550
  
  	for (i = (assign_obf_end - cur_obf); i != 0; i--) {
  		cur_obf++;
  		if (cur_obf >= mhba->list_num_io) {
  			cur_obf -= mhba->list_num_io;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
551
  			mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
  		}
  
  		p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
  
  		/* Copy pointer may point to entry in outbound list
  		*  before entry has valid data
  		*/
  		if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
  			mhba->tag_cmd[p_outb_frame->tag] == NULL ||
  			p_outb_frame->request_id !=
  				mhba->tag_cmd[p_outb_frame->tag]->request_id))
  			if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
  				continue;
  
  		if (!list_empty(&mhba->ob_data_list)) {
  			ob_data = (struct mvumi_ob_data *)
  				list_first_entry(&mhba->ob_data_list,
  					struct mvumi_ob_data, list);
  			list_del_init(&ob_data->list);
  		} else {
  			ob_data = NULL;
  			if (cur_obf == 0) {
  				cur_obf = mhba->list_num_io - 1;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
575
  				mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
576
577
578
579
580
581
582
583
584
585
  			} else
  				cur_obf -= 1;
  			break;
  		}
  
  		memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
  		p_outb_frame->tag = 0xff;
  
  		list_add_tail(&ob_data->list, &mhba->free_ob_list);
  	}
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
586
587
588
  	mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
  	mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
  	iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
589
  }
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
590
  static void mvumi_reset(struct mvumi_hba *mhba)
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
591
  {
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
592
593
594
595
  	struct mvumi_hw_regs *regs = mhba->regs;
  
  	iowrite32(0, regs->enpointa_mask_reg);
  	if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
596
  		return;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
597
  	iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
598
599
600
601
602
603
604
  }
  
  static unsigned char mvumi_start(struct mvumi_hba *mhba);
  
  static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
  {
  	mhba->fw_state = FW_STATE_ABORT;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
605
  	mvumi_reset(mhba);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
606
607
608
609
610
611
  
  	if (mvumi_start(mhba))
  		return FAILED;
  	else
  		return SUCCESS;
  }
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
  static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
  {
  	struct mvumi_hw_regs *regs = mhba->regs;
  	u32 tmp;
  	unsigned long before;
  	before = jiffies;
  
  	iowrite32(0, regs->enpointa_mask_reg);
  	tmp = ioread32(regs->arm_to_pciea_msg1);
  	while (tmp != HANDSHAKE_READYSTATE) {
  		iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
  		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
  			dev_err(&mhba->pdev->dev,
  				"FW reset failed [0x%x].
  ", tmp);
  			return FAILED;
  		}
  
  		msleep(500);
  		rmb();
  		tmp = ioread32(regs->arm_to_pciea_msg1);
  	}
  
  	return SUCCESS;
  }
  
  static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
  {
  	unsigned char i;
  
  	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
  		pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
  						&mhba->pci_base[i]);
  	}
  }
  
  static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
  {
  	unsigned char i;
  
  	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
  		if (mhba->pci_base[i])
  			pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
  						mhba->pci_base[i]);
  	}
  }
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
658
  static int mvumi_pci_set_master(struct pci_dev *pdev)
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
659
  {
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
660
  	int ret = 0;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
661
662
663
  	pci_set_master(pdev);
  
  	if (IS_DMA64) {
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
664
665
  		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
  			ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
666
  	} else
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
667
  		ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
  
  	return ret;
  }
  
  static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
  {
  	mhba->fw_state = FW_STATE_ABORT;
  
  	iowrite32(0, mhba->regs->reset_enable);
  	iowrite32(0xf, mhba->regs->reset_request);
  
  	iowrite32(0x10, mhba->regs->reset_enable);
  	iowrite32(0x10, mhba->regs->reset_request);
  	msleep(100);
  	pci_disable_device(mhba->pdev);
  
  	if (pci_enable_device(mhba->pdev)) {
  		dev_err(&mhba->pdev->dev, "enable device failed
  ");
  		return FAILED;
  	}
  	if (mvumi_pci_set_master(mhba->pdev)) {
  		dev_err(&mhba->pdev->dev, "set master failed
  ");
  		return FAILED;
  	}
  	mvumi_restore_bar_addr(mhba);
  	if (mvumi_wait_for_fw(mhba) == FAILED)
  		return FAILED;
  
  	return mvumi_wait_for_outstanding(mhba);
  }
  
  static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
  {
  	return mvumi_wait_for_outstanding(mhba);
  }
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
705
706
707
708
709
  static int mvumi_host_reset(struct scsi_cmnd *scmd)
  {
  	struct mvumi_hba *mhba;
  
  	mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
7df158ce6   Hannes Reinecke   scsi: mvumi: use ...
710
711
712
  	scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x
  ",
  			scmd->request->tag, scmd->cmnd[0], scmd->retries);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
713

bd756ddea   Shun Fu   [SCSI] mvumi: Add...
714
  	return mhba->instancet->reset_host(mhba);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
  }
  
  static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
  						struct mvumi_cmd *cmd)
  {
  	unsigned long flags;
  
  	cmd->cmd_status = REQ_STATUS_PENDING;
  
  	if (atomic_read(&cmd->sync_cmd)) {
  		dev_err(&mhba->pdev->dev,
  			"last blocked cmd not finished, sync_cmd = %d
  ",
  						atomic_read(&cmd->sync_cmd));
  		BUG_ON(1);
  		return -1;
  	}
  	atomic_inc(&cmd->sync_cmd);
  	spin_lock_irqsave(mhba->shost->host_lock, flags);
  	mhba->instancet->fire_cmd(mhba, cmd);
  	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
  
  	wait_event_timeout(mhba->int_cmd_wait_q,
  		(cmd->cmd_status != REQ_STATUS_PENDING),
  		MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
  
  	/* command timeout */
  	if (atomic_read(&cmd->sync_cmd)) {
  		spin_lock_irqsave(mhba->shost->host_lock, flags);
  		atomic_dec(&cmd->sync_cmd);
  		if (mhba->tag_cmd[cmd->frame->tag]) {
7512ddef6   YueHaibing   scsi: mvumi: Stop...
746
  			mhba->tag_cmd[cmd->frame->tag] = NULL;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
  			dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]
  ",
  							cmd->frame->tag);
  			tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
  		}
  		if (!list_empty(&cmd->queue_pointer)) {
  			dev_warn(&mhba->pdev->dev,
  				"TIMEOUT:A internal command doesn't send!
  ");
  			list_del_init(&cmd->queue_pointer);
  		} else
  			atomic_dec(&mhba->fw_outstanding);
  
  		spin_unlock_irqrestore(mhba->shost->host_lock, flags);
  	}
  	return 0;
  }
  
  static void mvumi_release_fw(struct mvumi_hba *mhba)
  {
  	mvumi_free_cmds(mhba);
  	mvumi_release_mem_resource(mhba);
  	mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
770
  	dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
771
772
  		mhba->handshake_page, mhba->handshake_page_phys);
  	kfree(mhba->regs);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
  	pci_release_regions(mhba->pdev);
  }
  
  static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
  {
  	struct mvumi_cmd *cmd;
  	struct mvumi_msg_frame *frame;
  	unsigned char device_id, retry = 0;
  	unsigned char bitcount = sizeof(unsigned char) * 8;
  
  	for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
  		if (!(mhba->target_map[device_id / bitcount] &
  				(1 << (device_id % bitcount))))
  			continue;
  get_cmd:	cmd = mvumi_create_internal_cmd(mhba, 0);
  		if (!cmd) {
  			if (retry++ >= 5) {
  				dev_err(&mhba->pdev->dev, "failed to get memory"
  					" for internal flush cache cmd for "
  					"device %d", device_id);
  				retry = 0;
  				continue;
  			} else
  				goto get_cmd;
  		}
  		cmd->scmd = NULL;
  		cmd->cmd_status = REQ_STATUS_PENDING;
  		atomic_set(&cmd->sync_cmd, 0);
  		frame = cmd->frame;
  		frame->req_function = CL_FUN_SCSI_CMD;
  		frame->device_id = device_id;
  		frame->cmd_flag = CMD_FLAG_NON_DATA;
  		frame->data_transfer_length = 0;
  		frame->cdb_length = MAX_COMMAND_SIZE;
  		memset(frame->cdb, 0, MAX_COMMAND_SIZE);
  		frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
809
  		frame->cdb[1] = CDB_CORE_MODULE;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
  		frame->cdb[2] = CDB_CORE_SHUTDOWN;
  
  		mvumi_issue_blocked_cmd(mhba, cmd);
  		if (cmd->cmd_status != SAM_STAT_GOOD) {
  			dev_err(&mhba->pdev->dev,
  				"device %d flush cache failed, status=0x%x.
  ",
  				device_id, cmd->cmd_status);
  		}
  
  		mvumi_delete_internal_cmd(mhba, cmd);
  	}
  	return 0;
  }
  
  static unsigned char
  mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
  							unsigned short len)
  {
  	unsigned char *ptr;
  	unsigned char ret = 0, i;
  
  	ptr = (unsigned char *) p_header->frame_content;
  	for (i = 0; i < len; i++) {
  		ret ^= *ptr;
  		ptr++;
  	}
  
  	return ret;
  }
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
840
  static void mvumi_hs_build_page(struct mvumi_hba *mhba,
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
841
842
843
844
845
  				struct mvumi_hs_header *hs_header)
  {
  	struct mvumi_hs_page2 *hs_page2;
  	struct mvumi_hs_page4 *hs_page4;
  	struct mvumi_hs_page3 *hs_page3;
36f8ef7f7   Tina Ruchandani   mvumi: 64bit valu...
846
847
  	u64 time;
  	u64 local_time;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
848
849
850
851
852
853
854
  
  	switch (hs_header->page_code) {
  	case HS_PAGE_HOST_INFO:
  		hs_page2 = (struct mvumi_hs_page2 *) hs_header;
  		hs_header->frame_length = sizeof(*hs_page2) - 4;
  		memset(hs_header->frame_content, 0, hs_header->frame_length);
  		hs_page2->host_type = 3; /* 3 mean linux*/
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
855
856
  		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
  			hs_page2->host_cap = 0x08;/* host dynamic source mode */
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
857
858
859
860
861
862
863
864
  		hs_page2->host_ver.ver_major = VER_MAJOR;
  		hs_page2->host_ver.ver_minor = VER_MINOR;
  		hs_page2->host_ver.ver_oem = VER_OEM;
  		hs_page2->host_ver.ver_build = VER_BUILD;
  		hs_page2->system_io_bus = 0;
  		hs_page2->slot_number = 0;
  		hs_page2->intr_level = 0;
  		hs_page2->intr_vector = 0;
36f8ef7f7   Tina Ruchandani   mvumi: 64bit valu...
865
866
  		time = ktime_get_real_seconds();
  		local_time = (time - (sys_tz.tz_minuteswest * 60));
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
  		hs_page2->seconds_since1970 = local_time;
  		hs_header->checksum = mvumi_calculate_checksum(hs_header,
  						hs_header->frame_length);
  		break;
  
  	case HS_PAGE_FIRM_CTL:
  		hs_page3 = (struct mvumi_hs_page3 *) hs_header;
  		hs_header->frame_length = sizeof(*hs_page3) - 4;
  		memset(hs_header->frame_content, 0, hs_header->frame_length);
  		hs_header->checksum = mvumi_calculate_checksum(hs_header,
  						hs_header->frame_length);
  		break;
  
  	case HS_PAGE_CL_INFO:
  		hs_page4 = (struct mvumi_hs_page4 *) hs_header;
  		hs_header->frame_length = sizeof(*hs_page4) - 4;
  		memset(hs_header->frame_content, 0, hs_header->frame_length);
  		hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
  		hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
  
  		hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
  		hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
  		hs_page4->ib_entry_size = mhba->ib_max_size_setting;
  		hs_page4->ob_entry_size = mhba->ob_max_size_setting;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
891
892
893
894
895
896
897
898
899
900
901
902
  		if (mhba->hba_capability
  			& HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
  			hs_page4->ob_depth = find_first_bit((unsigned long *)
  							    &mhba->list_num_io,
  							    BITS_PER_LONG);
  			hs_page4->ib_depth = find_first_bit((unsigned long *)
  							    &mhba->list_num_io,
  							    BITS_PER_LONG);
  		} else {
  			hs_page4->ob_depth = (u8) mhba->list_num_io;
  			hs_page4->ib_depth = (u8) mhba->list_num_io;
  		}
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
  		hs_header->checksum = mvumi_calculate_checksum(hs_header,
  						hs_header->frame_length);
  		break;
  
  	default:
  		dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]
  ",
  			hs_header->page_code);
  		break;
  	}
  }
  
  /**
   * mvumi_init_data -	Initialize requested date for FW
   * @mhba:			Adapter soft state
   */
  static int mvumi_init_data(struct mvumi_hba *mhba)
  {
  	struct mvumi_ob_data *ob_pool;
  	struct mvumi_res *res_mgnt;
  	unsigned int tmp_size, offset, i;
  	void *virmem, *v;
  	dma_addr_t p;
  
  	if (mhba->fw_flag & MVUMI_FW_ALLOC)
  		return 0;
  
  	tmp_size = mhba->ib_max_size * mhba->max_io;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
931
932
  	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
  		tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
933
  	tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
934
  	tmp_size += 8 + sizeof(u32)*2 + 16;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
  
  	res_mgnt = mvumi_alloc_mem_resource(mhba,
  					RESOURCE_UNCACHED_MEMORY, tmp_size);
  	if (!res_mgnt) {
  		dev_err(&mhba->pdev->dev,
  			"failed to allocate memory for inbound list
  ");
  		goto fail_alloc_dma_buf;
  	}
  
  	p = res_mgnt->bus_addr;
  	v = res_mgnt->virt_addr;
  	/* ib_list */
  	offset = round_up(p, 128) - p;
  	p += offset;
  	v += offset;
  	mhba->ib_list = v;
  	mhba->ib_list_phys = p;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
953
954
955
956
957
958
  	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
  		v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
  		p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
  		mhba->ib_frame = v;
  		mhba->ib_frame_phys = p;
  	}
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
959
960
  	v += mhba->ib_max_size * mhba->max_io;
  	p += mhba->ib_max_size * mhba->max_io;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
961

f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
962
963
964
965
966
967
  	/* ib shadow */
  	offset = round_up(p, 8) - p;
  	p += offset;
  	v += offset;
  	mhba->ib_shadow = v;
  	mhba->ib_shadow_phys = p;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
968
969
  	p += sizeof(u32)*2;
  	v += sizeof(u32)*2;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
970
  	/* ob shadow */
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
  	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
  		offset = round_up(p, 8) - p;
  		p += offset;
  		v += offset;
  		mhba->ob_shadow = v;
  		mhba->ob_shadow_phys = p;
  		p += 8;
  		v += 8;
  	} else {
  		offset = round_up(p, 4) - p;
  		p += offset;
  		v += offset;
  		mhba->ob_shadow = v;
  		mhba->ob_shadow_phys = p;
  		p += 4;
  		v += 4;
  	}
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
  
  	/* ob list */
  	offset = round_up(p, 128) - p;
  	p += offset;
  	v += offset;
  
  	mhba->ob_list = v;
  	mhba->ob_list_phys = p;
  
  	/* ob data pool */
  	tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
  	tmp_size = round_up(tmp_size, 8);
  
  	res_mgnt = mvumi_alloc_mem_resource(mhba,
  				RESOURCE_CACHED_MEMORY, tmp_size);
  	if (!res_mgnt) {
  		dev_err(&mhba->pdev->dev,
  			"failed to allocate memory for outbound data buffer
  ");
  		goto fail_alloc_dma_buf;
  	}
  	virmem = res_mgnt->virt_addr;
  
  	for (i = mhba->max_io; i != 0; i--) {
  		ob_pool = (struct mvumi_ob_data *) virmem;
  		list_add_tail(&ob_pool->list, &mhba->ob_data_list);
  		virmem += mhba->ob_max_size + sizeof(*ob_pool);
  	}
  
  	tmp_size = sizeof(unsigned short) * mhba->max_io +
  				sizeof(struct mvumi_cmd *) * mhba->max_io;
  	tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
  						(sizeof(unsigned char) * 8);
  
  	res_mgnt = mvumi_alloc_mem_resource(mhba,
  				RESOURCE_CACHED_MEMORY, tmp_size);
  	if (!res_mgnt) {
  		dev_err(&mhba->pdev->dev,
  			"failed to allocate memory for tag and target map
  ");
  		goto fail_alloc_dma_buf;
  	}
  
  	virmem = res_mgnt->virt_addr;
  	mhba->tag_pool.stack = virmem;
  	mhba->tag_pool.size = mhba->max_io;
  	tag_init(&mhba->tag_pool, mhba->max_io);
  	virmem += sizeof(unsigned short) * mhba->max_io;
  
  	mhba->tag_cmd = virmem;
  	virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
  
  	mhba->target_map = virmem;
  
  	mhba->fw_flag |= MVUMI_FW_ALLOC;
  	return 0;
  
  fail_alloc_dma_buf:
  	mvumi_release_mem_resource(mhba);
  	return -1;
  }
  
  static int mvumi_hs_process_page(struct mvumi_hba *mhba,
  				struct mvumi_hs_header *hs_header)
  {
  	struct mvumi_hs_page1 *hs_page1;
  	unsigned char page_checksum;
  
  	page_checksum = mvumi_calculate_checksum(hs_header,
  						hs_header->frame_length);
  	if (page_checksum != hs_header->checksum) {
  		dev_err(&mhba->pdev->dev, "checksum error
  ");
  		return -1;
  	}
  
  	switch (hs_header->page_code) {
  	case HS_PAGE_FIRM_CAP:
  		hs_page1 = (struct mvumi_hs_page1 *) hs_header;
  
  		mhba->max_io = hs_page1->max_io_support;
  		mhba->list_num_io = hs_page1->cl_inout_list_depth;
  		mhba->max_transfer_size = hs_page1->max_transfer_size;
  		mhba->max_target_id = hs_page1->max_devices_support;
  		mhba->hba_capability = hs_page1->capability;
  		mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
  		mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
  
  		mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
  		mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
  
  		dev_dbg(&mhba->pdev->dev, "FW version:%d
  ",
  						hs_page1->fw_ver.ver_build);
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1082
1083
1084
1085
1086
1087
  		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
  			mhba->eot_flag = 22;
  		else
  			mhba->eot_flag = 27;
  		if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
  			mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
  		break;
  	default:
  		dev_err(&mhba->pdev->dev, "handshake: page code error
  ");
  		return -1;
  	}
  	return 0;
  }
  
  /**
   * mvumi_handshake -	Move the FW to READY state
   * @mhba:				Adapter soft state
   *
   * During the initialization, FW passes can potentially be in any one of
   * several possible states. If the FW in operational, waiting-for-handshake
   * states, driver must take steps to bring it to ready state. Otherwise, it
   * has to wait for the ready state.
   */
  static int mvumi_handshake(struct mvumi_hba *mhba)
  {
  	unsigned int hs_state, tmp, hs_fun;
  	struct mvumi_hs_header *hs_header;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1110
  	struct mvumi_hw_regs *regs = mhba->regs;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1111
1112
1113
1114
  
  	if (mhba->fw_state == FW_STATE_STARTING)
  		hs_state = HS_S_START;
  	else {
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1115
  		tmp = ioread32(regs->arm_to_pciea_msg0);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
  		hs_state = HS_GET_STATE(tmp);
  		dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].
  ", hs_state);
  		if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
  			mhba->fw_state = FW_STATE_STARTING;
  			return -1;
  		}
  	}
  
  	hs_fun = 0;
  	switch (hs_state) {
  	case HS_S_START:
  		mhba->fw_state = FW_STATE_HANDSHAKING;
  		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
  		HS_SET_STATE(hs_fun, HS_S_RESET);
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1131
1132
1133
  		iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
  		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
  		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1134
1135
1136
1137
  		break;
  
  	case HS_S_RESET:
  		iowrite32(lower_32_bits(mhba->handshake_page_phys),
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1138
  					regs->pciea_to_arm_msg1);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1139
  		iowrite32(upper_32_bits(mhba->handshake_page_phys),
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1140
  					regs->arm_to_pciea_msg1);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1141
1142
  		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
  		HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1143
1144
  		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
  		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
  		break;
  
  	case HS_S_PAGE_ADDR:
  	case HS_S_QUERY_PAGE:
  	case HS_S_SEND_PAGE:
  		hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
  		if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
  			mhba->hba_total_pages =
  			((struct mvumi_hs_page1 *) hs_header)->total_pages;
  
  			if (mhba->hba_total_pages == 0)
  				mhba->hba_total_pages = HS_PAGE_TOTAL-1;
  		}
  
  		if (hs_state == HS_S_QUERY_PAGE) {
  			if (mvumi_hs_process_page(mhba, hs_header)) {
  				HS_SET_STATE(hs_fun, HS_S_ABORT);
  				return -1;
  			}
  			if (mvumi_init_data(mhba)) {
  				HS_SET_STATE(hs_fun, HS_S_ABORT);
  				return -1;
  			}
  		} else if (hs_state == HS_S_PAGE_ADDR) {
  			hs_header->page_code = 0;
  			mhba->hba_total_pages = HS_PAGE_TOTAL-1;
  		}
  
  		if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
  			hs_header->page_code++;
  			if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
  				mvumi_hs_build_page(mhba, hs_header);
  				HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
  			} else
  				HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
  		} else
  			HS_SET_STATE(hs_fun, HS_S_END);
  
  		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1184
1185
  		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
  		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1186
1187
1188
1189
  		break;
  
  	case HS_S_END:
  		/* Set communication list ISR */
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1190
1191
1192
  		tmp = ioread32(regs->enpointa_mask_reg);
  		tmp |= regs->int_comaout | regs->int_comaerr;
  		iowrite32(tmp, regs->enpointa_mask_reg);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1193
  		iowrite32(mhba->list_num_io, mhba->ib_shadow);
59e13d483   Masanari Iida   scsi: fix various...
1194
  		/* Set InBound List Available count shadow */
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1195
  		iowrite32(lower_32_bits(mhba->ib_shadow_phys),
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1196
  					regs->inb_aval_count_basel);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1197
  		iowrite32(upper_32_bits(mhba->ib_shadow_phys),
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
  					regs->inb_aval_count_baseh);
  
  		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
  			/* Set OutBound List Available count shadow */
  			iowrite32((mhba->list_num_io-1) |
  							regs->cl_pointer_toggle,
  							mhba->ob_shadow);
  			iowrite32(lower_32_bits(mhba->ob_shadow_phys),
  							regs->outb_copy_basel);
  			iowrite32(upper_32_bits(mhba->ob_shadow_phys),
  							regs->outb_copy_baseh);
  		}
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1210

bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1211
1212
1213
1214
  		mhba->ib_cur_slot = (mhba->list_num_io - 1) |
  							regs->cl_pointer_toggle;
  		mhba->ob_cur_slot = (mhba->list_num_io - 1) |
  							regs->cl_pointer_toggle;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
  		mhba->fw_state = FW_STATE_STARTED;
  
  		break;
  	default:
  		dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].
  ",
  								hs_state);
  		return -1;
  	}
  	return 0;
  }
  
  static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
  {
  	unsigned int isr_status;
  	unsigned long before;
  
  	before = jiffies;
  	mvumi_handshake(mhba);
  	do {
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1235
  		isr_status = mhba->instancet->read_fw_status_reg(mhba);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
  
  		if (mhba->fw_state == FW_STATE_STARTED)
  			return 0;
  		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
  			dev_err(&mhba->pdev->dev,
  				"no handshake response at state 0x%x.
  ",
  				  mhba->fw_state);
  			dev_err(&mhba->pdev->dev,
  				"isr : global=0x%x,status=0x%x.
  ",
  					mhba->global_isr, isr_status);
  			return -1;
  		}
  		rmb();
  		usleep_range(1000, 2000);
  	} while (!(isr_status & DRBL_HANDSHAKE_ISR));
  
  	return 0;
  }
  
  static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
  {
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1259
1260
1261
1262
  	unsigned int tmp;
  	unsigned long before;
  
  	before = jiffies;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1263
  	tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1264
1265
1266
  	while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
  		if (tmp != HANDSHAKE_READYSTATE)
  			iowrite32(DRBL_MU_RESET,
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1267
  					mhba->regs->pciea_to_arm_drbl_reg);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1268
1269
1270
1271
1272
1273
1274
1275
  		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
  			dev_err(&mhba->pdev->dev,
  				"invalid signature [0x%x].
  ", tmp);
  			return -1;
  		}
  		usleep_range(1000, 2000);
  		rmb();
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1276
  		tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
  	}
  
  	mhba->fw_state = FW_STATE_STARTING;
  	dev_dbg(&mhba->pdev->dev, "start firmware handshake...
  ");
  	do {
  		if (mvumi_handshake_event(mhba)) {
  			dev_err(&mhba->pdev->dev,
  					"handshake failed at state 0x%x.
  ",
  						mhba->fw_state);
  			return -1;
  		}
  	} while (mhba->fw_state != FW_STATE_STARTED);
  
  	dev_dbg(&mhba->pdev->dev, "firmware handshake done
  ");
  
  	return 0;
  }
  
  static unsigned char mvumi_start(struct mvumi_hba *mhba)
  {
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1300
  	unsigned int tmp;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1301
  	struct mvumi_hw_regs *regs = mhba->regs;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1302
  	/* clear Door bell */
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1303
1304
  	tmp = ioread32(regs->arm_to_pciea_drbl_reg);
  	iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1305

bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1306
1307
1308
1309
  	iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
  	tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
  	iowrite32(tmp, regs->enpointa_mask_reg);
  	msleep(100);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
  	if (mvumi_check_handshake(mhba))
  		return -1;
  
  	return 0;
  }
  
  /**
   * mvumi_complete_cmd -	Completes a command
   * @mhba:			Adapter soft state
   * @cmd:			Command to be completed
   */
  static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
  					struct mvumi_rsp_frame *ob_frame)
  {
  	struct scsi_cmnd *scmd = cmd->scmd;
  
  	cmd->scmd->SCp.ptr = NULL;
  	scmd->result = ob_frame->req_status;
  
  	switch (ob_frame->req_status) {
  	case SAM_STAT_GOOD:
  		scmd->result |= DID_OK << 16;
  		break;
  	case SAM_STAT_BUSY:
  		scmd->result |= DID_BUS_BUSY << 16;
  		break;
  	case SAM_STAT_CHECK_CONDITION:
  		scmd->result |= (DID_OK << 16);
  		if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
  			memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
  				sizeof(struct mvumi_sense_data));
  			scmd->result |=  (DRIVER_SENSE << 24);
  		}
  		break;
  	default:
  		scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
  		break;
  	}
4bd13a077   Alexey Khoroshilov   scsi: mvumi: remo...
1348
  	if (scsi_bufflen(scmd))
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
1349
  		dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
4bd13a077   Alexey Khoroshilov   scsi: mvumi: remo...
1350
  			     scsi_sg_count(scmd),
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
1351
  			     scmd->sc_data_direction);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1352
1353
1354
  	cmd->scmd->scsi_done(scmd);
  	mvumi_return_cmd(mhba, cmd);
  }
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1355

f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
  static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
  						struct mvumi_cmd *cmd,
  					struct mvumi_rsp_frame *ob_frame)
  {
  	if (atomic_read(&cmd->sync_cmd)) {
  		cmd->cmd_status = ob_frame->req_status;
  
  		if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
  				(ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
  				cmd->data_buf) {
  			memcpy(cmd->data_buf, ob_frame->payload,
  					sizeof(struct mvumi_sense_data));
  		}
  		atomic_dec(&cmd->sync_cmd);
  		wake_up(&mhba->int_cmd_wait_q);
  	}
  }
  
  static void mvumi_show_event(struct mvumi_hba *mhba,
  			struct mvumi_driver_event *ptr)
  {
  	unsigned int i;
  
  	dev_warn(&mhba->pdev->dev,
  		"Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]
  ",
  		ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
  	if (ptr->param_count) {
  		printk(KERN_WARNING "Event param(len 0x%x): ",
  						ptr->param_count);
  		for (i = 0; i < ptr->param_count; i++)
  			printk(KERN_WARNING "0x%x ", ptr->params[i]);
  
  		printk(KERN_WARNING "
  ");
  	}
  
  	if (ptr->sense_data_length) {
  		printk(KERN_WARNING "Event sense data(len 0x%x): ",
  						ptr->sense_data_length);
  		for (i = 0; i < ptr->sense_data_length; i++)
  			printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
  		printk(KERN_WARNING "
  ");
  	}
  }
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
  static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
  {
  	struct scsi_device *sdev;
  	int ret = -1;
  
  	if (status == DEVICE_OFFLINE) {
  		sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
  		if (sdev) {
  			dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.
  ", 0,
  								sdev->id, 0);
  			scsi_remove_device(sdev);
  			scsi_device_put(sdev);
  			ret = 0;
  		} else
  			dev_err(&mhba->pdev->dev, " no disk[%d] to remove
  ",
  									devid);
  	} else if (status == DEVICE_ONLINE) {
  		sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
  		if (!sdev) {
  			scsi_add_device(mhba->shost, 0, devid, 0);
  			dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.
  ", 0,
  								devid, 0);
  			ret = 0;
  		} else {
  			dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.
  ",
  								0, devid, 0);
  			scsi_device_put(sdev);
  		}
  	}
  	return ret;
  }
  
  static u64 mvumi_inquiry(struct mvumi_hba *mhba,
  	unsigned int id, struct mvumi_cmd *cmd)
  {
  	struct mvumi_msg_frame *frame;
  	u64 wwid = 0;
  	int cmd_alloc = 0;
  	int data_buf_len = 64;
  
  	if (!cmd) {
  		cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
  		if (cmd)
  			cmd_alloc = 1;
  		else
  			return 0;
  	} else {
  		memset(cmd->data_buf, 0, data_buf_len);
  	}
  	cmd->scmd = NULL;
  	cmd->cmd_status = REQ_STATUS_PENDING;
  	atomic_set(&cmd->sync_cmd, 0);
  	frame = cmd->frame;
  	frame->device_id = (u16) id;
  	frame->cmd_flag = CMD_FLAG_DATA_IN;
  	frame->req_function = CL_FUN_SCSI_CMD;
  	frame->cdb_length = 6;
  	frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
  	memset(frame->cdb, 0, frame->cdb_length);
  	frame->cdb[0] = INQUIRY;
  	frame->cdb[4] = frame->data_transfer_length;
  
  	mvumi_issue_blocked_cmd(mhba, cmd);
  
  	if (cmd->cmd_status == SAM_STAT_GOOD) {
  		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
  			wwid = id + 1;
  		else
  			memcpy((void *)&wwid,
  			       (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
  			       MVUMI_INQUIRY_UUID_LEN);
  		dev_dbg(&mhba->pdev->dev,
  			"inquiry device(0:%d:0) wwid(%llx)
  ", id, wwid);
  	} else {
  		wwid = 0;
  	}
  	if (cmd_alloc)
  		mvumi_delete_internal_cmd(mhba, cmd);
  
  	return wwid;
  }
  
  static void mvumi_detach_devices(struct mvumi_hba *mhba)
  {
  	struct mvumi_device *mv_dev = NULL , *dev_next;
  	struct scsi_device *sdev = NULL;
  
  	mutex_lock(&mhba->device_lock);
  
  	/* detach Hard Disk */
  	list_for_each_entry_safe(mv_dev, dev_next,
  		&mhba->shost_dev_list, list) {
  		mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
  		list_del_init(&mv_dev->list);
  		dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)
  ",
  			mv_dev->id, mv_dev->wwid);
  		kfree(mv_dev);
  	}
  	list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
  		list_del_init(&mv_dev->list);
  		dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)
  ",
  			mv_dev->id, mv_dev->wwid);
  		kfree(mv_dev);
  	}
  
  	/* detach virtual device */
  	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
  		sdev = scsi_device_lookup(mhba->shost, 0,
  						mhba->max_target_id - 1, 0);
  
  	if (sdev) {
  		scsi_remove_device(sdev);
  		scsi_device_put(sdev);
  	}
  
  	mutex_unlock(&mhba->device_lock);
  }
  
  static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
  {
  	struct scsi_device *sdev;
  
  	sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
  	if (sdev) {
  		scsi_rescan_device(&sdev->sdev_gendev);
  		scsi_device_put(sdev);
  	}
  }
  
  static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
  {
  	struct mvumi_device *mv_dev = NULL;
  
  	list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
  		if (mv_dev->wwid == wwid) {
  			if (mv_dev->id != id) {
  				dev_err(&mhba->pdev->dev,
  					"%s has same wwid[%llx] ,"
  					" but different id[%d %d]
  ",
  					__func__, mv_dev->wwid, mv_dev->id, id);
  				return -1;
  			} else {
  				if (mhba->pdev->device ==
  						PCI_DEVICE_ID_MARVELL_MV9143)
  					mvumi_rescan_devices(mhba, id);
  				return 1;
  			}
  		}
  	}
  	return 0;
  }
  
  static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
  {
  	struct mvumi_device *mv_dev = NULL, *dev_next;
  
  	list_for_each_entry_safe(mv_dev, dev_next,
  				&mhba->shost_dev_list, list) {
  		if (mv_dev->id == id) {
  			dev_dbg(&mhba->pdev->dev,
  				"detach device(0:%d:0) wwid(%llx) from HOST
  ",
  				mv_dev->id, mv_dev->wwid);
  			mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
  			list_del_init(&mv_dev->list);
  			kfree(mv_dev);
  		}
  	}
  }
  
  static int mvumi_probe_devices(struct mvumi_hba *mhba)
  {
  	int id, maxid;
  	u64 wwid = 0;
  	struct mvumi_device *mv_dev = NULL;
  	struct mvumi_cmd *cmd = NULL;
  	int found = 0;
  
  	cmd = mvumi_create_internal_cmd(mhba, 64);
  	if (!cmd)
  		return -1;
  
  	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
  		maxid = mhba->max_target_id;
  	else
  		maxid = mhba->max_target_id - 1;
  
  	for (id = 0; id < maxid; id++) {
  		wwid = mvumi_inquiry(mhba, id, cmd);
  		if (!wwid) {
  			/* device no response, remove it */
  			mvumi_remove_devices(mhba, id);
  		} else {
  			/* device response, add it */
  			found = mvumi_match_devices(mhba, id, wwid);
  			if (!found) {
  				mvumi_remove_devices(mhba, id);
  				mv_dev = kzalloc(sizeof(struct mvumi_device),
  								GFP_KERNEL);
  				if (!mv_dev) {
  					dev_err(&mhba->pdev->dev,
  						"%s alloc mv_dev failed
  ",
  						__func__);
  					continue;
  				}
  				mv_dev->id = id;
  				mv_dev->wwid = wwid;
  				mv_dev->sdev = NULL;
  				INIT_LIST_HEAD(&mv_dev->list);
  				list_add_tail(&mv_dev->list,
  					      &mhba->mhba_dev_list);
  				dev_dbg(&mhba->pdev->dev,
  					"probe a new device(0:%d:0)"
  					" wwid(%llx)
  ", id, mv_dev->wwid);
  			} else if (found == -1)
  				return -1;
  			else
  				continue;
  		}
  	}
  
  	if (cmd)
  		mvumi_delete_internal_cmd(mhba, cmd);
  
  	return 0;
  }
  
  static int mvumi_rescan_bus(void *data)
  {
  	int ret = 0;
  	struct mvumi_hba *mhba = (struct mvumi_hba *) data;
  	struct mvumi_device *mv_dev = NULL , *dev_next;
  
  	while (!kthread_should_stop()) {
  
  		set_current_state(TASK_INTERRUPTIBLE);
  		if (!atomic_read(&mhba->pnp_count))
  			schedule();
  		msleep(1000);
  		atomic_set(&mhba->pnp_count, 0);
  		__set_current_state(TASK_RUNNING);
  
  		mutex_lock(&mhba->device_lock);
  		ret = mvumi_probe_devices(mhba);
  		if (!ret) {
  			list_for_each_entry_safe(mv_dev, dev_next,
  						 &mhba->mhba_dev_list, list) {
  				if (mvumi_handle_hotplug(mhba, mv_dev->id,
  							 DEVICE_ONLINE)) {
  					dev_err(&mhba->pdev->dev,
  						"%s add device(0:%d:0) failed"
  						"wwid(%llx) has exist
  ",
  						__func__,
  						mv_dev->id, mv_dev->wwid);
  					list_del_init(&mv_dev->list);
  					kfree(mv_dev);
  				} else {
  					list_move_tail(&mv_dev->list,
  						       &mhba->shost_dev_list);
  				}
  			}
  		}
  		mutex_unlock(&mhba->device_lock);
  	}
  	return 0;
  }
  
  static void mvumi_proc_msg(struct mvumi_hba *mhba,
  					struct mvumi_hotplug_event *param)
  {
  	u16 size = param->size;
  	const unsigned long *ar_bitmap;
  	const unsigned long *re_bitmap;
  	int index;
  
  	if (mhba->fw_flag & MVUMI_FW_ATTACH) {
  		index = -1;
  		ar_bitmap = (const unsigned long *) param->bitmap;
  		re_bitmap = (const unsigned long *) &param->bitmap[size >> 3];
  
  		mutex_lock(&mhba->sas_discovery_mutex);
  		do {
  			index = find_next_zero_bit(ar_bitmap, size, index + 1);
  			if (index >= size)
  				break;
  			mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
  		} while (1);
  
  		index = -1;
  		do {
  			index = find_next_zero_bit(re_bitmap, size, index + 1);
  			if (index >= size)
  				break;
  			mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
  		} while (1);
  		mutex_unlock(&mhba->sas_discovery_mutex);
  	}
  }
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
  static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
  {
  	if (msg == APICDB1_EVENT_GETEVENT) {
  		int i, count;
  		struct mvumi_driver_event *param = NULL;
  		struct mvumi_event_req *er = buffer;
  		count = er->count;
  		if (count > MAX_EVENTS_RETURNED) {
  			dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
  					" than max event count[0x%x].
  ",
  					count, MAX_EVENTS_RETURNED);
  			return;
  		}
  		for (i = 0; i < count; i++) {
  			param = &er->events[i];
  			mvumi_show_event(mhba, param);
  		}
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1729
1730
  	} else if (msg == APICDB1_HOST_GETEVENT) {
  		mvumi_proc_msg(mhba, buffer);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
  	}
  }
  
  static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
  {
  	struct mvumi_cmd *cmd;
  	struct mvumi_msg_frame *frame;
  
  	cmd = mvumi_create_internal_cmd(mhba, 512);
  	if (!cmd)
  		return -1;
  	cmd->scmd = NULL;
  	cmd->cmd_status = REQ_STATUS_PENDING;
  	atomic_set(&cmd->sync_cmd, 0);
  	frame = cmd->frame;
  	frame->device_id = 0;
  	frame->cmd_flag = CMD_FLAG_DATA_IN;
  	frame->req_function = CL_FUN_SCSI_CMD;
  	frame->cdb_length = MAX_COMMAND_SIZE;
  	frame->data_transfer_length = sizeof(struct mvumi_event_req);
  	memset(frame->cdb, 0, MAX_COMMAND_SIZE);
  	frame->cdb[0] = APICDB0_EVENT;
  	frame->cdb[1] = msg;
  	mvumi_issue_blocked_cmd(mhba, cmd);
  
  	if (cmd->cmd_status != SAM_STAT_GOOD)
  		dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.
  ",
  							cmd->cmd_status);
  	else
  		mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
  
  	mvumi_delete_internal_cmd(mhba, cmd);
  	return 0;
  }
  
  static void mvumi_scan_events(struct work_struct *work)
  {
  	struct mvumi_events_wq *mu_ev =
  		container_of(work, struct mvumi_events_wq, work_q);
  
  	mvumi_get_event(mu_ev->mhba, mu_ev->event);
  	kfree(mu_ev);
  }
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1775
  static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1776
1777
  {
  	struct mvumi_events_wq *mu_ev;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
  	while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
  		if (isr_status & DRBL_BUS_CHANGE) {
  			atomic_inc(&mhba->pnp_count);
  			wake_up_process(mhba->dm_thread);
  			isr_status &= ~(DRBL_BUS_CHANGE);
  			continue;
  		}
  
  		mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
  		if (mu_ev) {
  			INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
  			mu_ev->mhba = mhba;
  			mu_ev->event = APICDB1_EVENT_GETEVENT;
  			isr_status &= ~(DRBL_EVENT_NOTIFY);
  			mu_ev->param = NULL;
  			schedule_work(&mu_ev->work_q);
  		}
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
  	}
  }
  
  static void mvumi_handle_clob(struct mvumi_hba *mhba)
  {
  	struct mvumi_rsp_frame *ob_frame;
  	struct mvumi_cmd *cmd;
  	struct mvumi_ob_data *pool;
  
  	while (!list_empty(&mhba->free_ob_list)) {
  		pool = list_first_entry(&mhba->free_ob_list,
  						struct mvumi_ob_data, list);
  		list_del_init(&pool->list);
  		list_add_tail(&pool->list, &mhba->ob_data_list);
  
  		ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
  		cmd = mhba->tag_cmd[ob_frame->tag];
  
  		atomic_dec(&mhba->fw_outstanding);
7512ddef6   YueHaibing   scsi: mvumi: Stop...
1814
  		mhba->tag_cmd[ob_frame->tag] = NULL;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
  		tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
  		if (cmd->scmd)
  			mvumi_complete_cmd(mhba, cmd, ob_frame);
  		else
  			mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
  	}
  	mhba->instancet->fire_cmd(mhba, NULL);
  }
  
  static irqreturn_t mvumi_isr_handler(int irq, void *devp)
  {
  	struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
  	unsigned long flags;
  
  	spin_lock_irqsave(mhba->shost->host_lock, flags);
  	if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
  		spin_unlock_irqrestore(mhba->shost->host_lock, flags);
  		return IRQ_NONE;
  	}
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1834
1835
1836
  	if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
  		if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
  			mvumi_launch_events(mhba, mhba->isr_status);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1837
1838
1839
1840
1841
  		if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
  			dev_warn(&mhba->pdev->dev, "enter handshake again!
  ");
  			mvumi_handshake(mhba);
  		}
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1842

f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1843
  	}
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1844
  	if (mhba->global_isr & mhba->regs->int_comaout)
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
  		mvumi_receive_ob_list_entry(mhba);
  
  	mhba->global_isr = 0;
  	mhba->isr_status = 0;
  	if (mhba->fw_state == FW_STATE_STARTED)
  		mvumi_handle_clob(mhba);
  	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
  	return IRQ_HANDLED;
  }
  
  static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
  						struct mvumi_cmd *cmd)
  {
  	void *ib_entry;
  	struct mvumi_msg_frame *ib_frame;
  	unsigned int frame_len;
  
  	ib_frame = cmd->frame;
  	if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
  		dev_dbg(&mhba->pdev->dev, "firmware not ready.
  ");
  		return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
  	}
  	if (tag_is_empty(&mhba->tag_pool)) {
  		dev_dbg(&mhba->pdev->dev, "no free tag.
  ");
  		return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
  	}
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1873
  	mvumi_get_ib_list_entry(mhba, &ib_entry);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1874
1875
1876
1877
1878
1879
1880
  
  	cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
  	cmd->frame->request_id = mhba->io_seq++;
  	cmd->request_id = cmd->frame->request_id;
  	mhba->tag_cmd[cmd->frame->tag] = cmd;
  	frame_len = sizeof(*ib_frame) - 4 +
  				ib_frame->sg_counts * sizeof(struct mvumi_sgl);
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
  	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
  		struct mvumi_dyn_list_entry *dle;
  		dle = ib_entry;
  		dle->src_low_addr =
  			cpu_to_le32(lower_32_bits(cmd->frame_phys));
  		dle->src_high_addr =
  			cpu_to_le32(upper_32_bits(cmd->frame_phys));
  		dle->if_length = (frame_len >> 2) & 0xFFF;
  	} else {
  		memcpy(ib_entry, ib_frame, frame_len);
  	}
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1892
1893
1894
1895
1896
1897
  	return MV_QUEUE_COMMAND_RESULT_SENT;
  }
  
  static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
  {
  	unsigned short num_of_cl_sent = 0;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1898
  	unsigned int count;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1899
1900
1901
1902
  	enum mvumi_qc_result result;
  
  	if (cmd)
  		list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1903
1904
1905
  	count = mhba->instancet->check_ib_list(mhba);
  	if (list_empty(&mhba->waiting_req_list) || !count)
  		return;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1906

bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1907
  	do {
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1908
  		cmd = list_first_entry(&mhba->waiting_req_list,
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1909
  				       struct mvumi_cmd, queue_pointer);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
  		list_del_init(&cmd->queue_pointer);
  		result = mvumi_send_command(mhba, cmd);
  		switch (result) {
  		case MV_QUEUE_COMMAND_RESULT_SENT:
  			num_of_cl_sent++;
  			break;
  		case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
  			list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
  			if (num_of_cl_sent > 0)
  				mvumi_send_ib_list_entry(mhba);
  
  			return;
  		}
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1923
  	} while (!list_empty(&mhba->waiting_req_list) && count--);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1924
1925
1926
1927
1928
1929
  	if (num_of_cl_sent > 0)
  		mvumi_send_ib_list_entry(mhba);
  }
  
  /**
   * mvumi_enable_intr -	Enables interrupts
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1930
   * @mhba:		Adapter soft state
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1931
   */
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1932
  static void mvumi_enable_intr(struct mvumi_hba *mhba)
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1933
1934
  {
  	unsigned int mask;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1935
  	struct mvumi_hw_regs *regs = mhba->regs;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1936

bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1937
1938
1939
1940
  	iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
  	mask = ioread32(regs->enpointa_mask_reg);
  	mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
  	iowrite32(mask, regs->enpointa_mask_reg);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1941
1942
1943
1944
  }
  
  /**
   * mvumi_disable_intr -Disables interrupt
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1945
   * @mhba:		Adapter soft state
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1946
   */
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1947
  static void mvumi_disable_intr(struct mvumi_hba *mhba)
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1948
1949
  {
  	unsigned int mask;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1950
  	struct mvumi_hw_regs *regs = mhba->regs;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1951

bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1952
1953
1954
1955
1956
  	iowrite32(0, regs->arm_to_pciea_mask_reg);
  	mask = ioread32(regs->enpointa_mask_reg);
  	mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
  							regs->int_comaerr);
  	iowrite32(mask, regs->enpointa_mask_reg);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1957
1958
1959
1960
1961
1962
  }
  
  static int mvumi_clear_intr(void *extend)
  {
  	struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
  	unsigned int status, isr_status = 0, tmp = 0;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1963
  	struct mvumi_hw_regs *regs = mhba->regs;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1964

bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1965
1966
  	status = ioread32(regs->main_int_cause_reg);
  	if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1967
  		return 1;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
  	if (unlikely(status & regs->int_comaerr)) {
  		tmp = ioread32(regs->outb_isr_cause);
  		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
  			if (tmp & regs->clic_out_err) {
  				iowrite32(tmp & regs->clic_out_err,
  							regs->outb_isr_cause);
  			}
  		} else {
  			if (tmp & (regs->clic_in_err | regs->clic_out_err))
  				iowrite32(tmp & (regs->clic_in_err |
  						regs->clic_out_err),
  						regs->outb_isr_cause);
  		}
  		status ^= mhba->regs->int_comaerr;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1982
1983
  		/* inbound or outbound parity error, command will timeout */
  	}
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1984
1985
1986
1987
  	if (status & regs->int_comaout) {
  		tmp = ioread32(regs->outb_isr_cause);
  		if (tmp & regs->clic_irq)
  			iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1988
  	}
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1989
1990
  	if (status & regs->int_dl_cpu2pciea) {
  		isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1991
  		if (isr_status)
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
1992
  			iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
  	}
  
  	mhba->global_isr = status;
  	mhba->isr_status = isr_status;
  
  	return 0;
  }
  
  /**
   * mvumi_read_fw_status_reg - returns the current FW status value
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2003
   * @mhba:		Adapter soft state
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2004
   */
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2005
  static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2006
2007
  {
  	unsigned int status;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2008
  	status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2009
  	if (status)
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2010
  		iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2011
2012
  	return status;
  }
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2013
  static struct mvumi_instance_template mvumi_instance_9143 = {
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2014
2015
2016
2017
2018
  	.fire_cmd = mvumi_fire_cmd,
  	.enable_intr = mvumi_enable_intr,
  	.disable_intr = mvumi_disable_intr,
  	.clear_intr = mvumi_clear_intr,
  	.read_fw_status_reg = mvumi_read_fw_status_reg,
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
  	.check_ib_list = mvumi_check_ib_list_9143,
  	.check_ob_list = mvumi_check_ob_list_9143,
  	.reset_host = mvumi_reset_host_9143,
  };
  
  static struct mvumi_instance_template mvumi_instance_9580 = {
  	.fire_cmd = mvumi_fire_cmd,
  	.enable_intr = mvumi_enable_intr,
  	.disable_intr = mvumi_disable_intr,
  	.clear_intr = mvumi_clear_intr,
  	.read_fw_status_reg = mvumi_read_fw_status_reg,
  	.check_ib_list = mvumi_check_ib_list_9580,
  	.check_ob_list = mvumi_check_ob_list_9580,
  	.reset_host = mvumi_reset_host_9580,
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
  };
  
  static int mvumi_slave_configure(struct scsi_device *sdev)
  {
  	struct mvumi_hba *mhba;
  	unsigned char bitcount = sizeof(unsigned char) * 8;
  
  	mhba = (struct mvumi_hba *) sdev->host->hostdata;
  	if (sdev->id >= mhba->max_target_id)
  		return -EINVAL;
  
  	mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
  	return 0;
  }
  
  /**
   * mvumi_build_frame -	Prepares a direct cdb (DCDB) command
   * @mhba:		Adapter soft state
   * @scmd:		SCSI command
   * @cmd:		Command to be prepared in
   *
   * This function prepares CDB commands. These are typcially pass-through
   * commands to the devices.
   */
  static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
  				struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
  {
  	struct mvumi_msg_frame *pframe;
  
  	cmd->scmd = scmd;
  	cmd->cmd_status = REQ_STATUS_PENDING;
  	pframe = cmd->frame;
  	pframe->device_id = ((unsigned short) scmd->device->id) |
  				(((unsigned short) scmd->device->lun) << 8);
  	pframe->cmd_flag = 0;
  
  	switch (scmd->sc_data_direction) {
  	case DMA_NONE:
  		pframe->cmd_flag |= CMD_FLAG_NON_DATA;
  		break;
  	case DMA_FROM_DEVICE:
  		pframe->cmd_flag |= CMD_FLAG_DATA_IN;
  		break;
  	case DMA_TO_DEVICE:
  		pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
  		break;
  	case DMA_BIDIRECTIONAL:
  	default:
  		dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
  			"cmd[0x%x]
  ", scmd->sc_data_direction, scmd->cmnd[0]);
  		goto error;
  	}
  
  	pframe->cdb_length = scmd->cmd_len;
  	memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
  	pframe->req_function = CL_FUN_SCSI_CMD;
  	if (scsi_bufflen(scmd)) {
  		if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
  			&pframe->sg_counts))
  			goto error;
  
  		pframe->data_transfer_length = scsi_bufflen(scmd);
  	} else {
  		pframe->sg_counts = 0;
  		pframe->data_transfer_length = 0;
  	}
  	return 0;
  
  error:
  	scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
  		SAM_STAT_CHECK_CONDITION;
  	scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
  									0);
  	return -1;
  }
  
  /**
   * mvumi_queue_command -	Queue entry point
   * @scmd:			SCSI command to be queued
   * @done:			Callback entry point
   */
  static int mvumi_queue_command(struct Scsi_Host *shost,
  					struct scsi_cmnd *scmd)
  {
  	struct mvumi_cmd *cmd;
  	struct mvumi_hba *mhba;
  	unsigned long irq_flags;
  
  	spin_lock_irqsave(shost->host_lock, irq_flags);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
  
  	mhba = (struct mvumi_hba *) shost->hostdata;
  	scmd->result = 0;
  	cmd = mvumi_get_cmd(mhba);
  	if (unlikely(!cmd)) {
  		spin_unlock_irqrestore(shost->host_lock, irq_flags);
  		return SCSI_MLQUEUE_HOST_BUSY;
  	}
  
  	if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
  		goto out_return_cmd;
  
  	cmd->scmd = scmd;
  	scmd->SCp.ptr = (char *) cmd;
  	mhba->instancet->fire_cmd(mhba, cmd);
  	spin_unlock_irqrestore(shost->host_lock, irq_flags);
  	return 0;
  
  out_return_cmd:
  	mvumi_return_cmd(mhba, cmd);
  	scmd->scsi_done(scmd);
  	spin_unlock_irqrestore(shost->host_lock, irq_flags);
  	return 0;
  }
  
  static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
  {
  	struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
  	struct Scsi_Host *host = scmd->device->host;
  	struct mvumi_hba *mhba = shost_priv(host);
  	unsigned long flags;
  
  	spin_lock_irqsave(mhba->shost->host_lock, flags);
  
  	if (mhba->tag_cmd[cmd->frame->tag]) {
7512ddef6   YueHaibing   scsi: mvumi: Stop...
2158
  		mhba->tag_cmd[cmd->frame->tag] = NULL;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
  		tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
  	}
  	if (!list_empty(&cmd->queue_pointer))
  		list_del_init(&cmd->queue_pointer);
  	else
  		atomic_dec(&mhba->fw_outstanding);
  
  	scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
  	scmd->SCp.ptr = NULL;
  	if (scsi_bufflen(scmd)) {
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
2169
  		dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
4bd13a077   Alexey Khoroshilov   scsi: mvumi: remo...
2170
  			     scsi_sg_count(scmd),
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
2171
  			     scmd->sc_data_direction);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2172
2173
2174
  	}
  	mvumi_return_cmd(mhba, cmd);
  	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
6600593cb   Christoph Hellwig   block: rename BLK...
2175
  	return BLK_EH_DONE;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
  }
  
  static int
  mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
  			sector_t capacity, int geom[])
  {
  	int heads, sectors;
  	sector_t cylinders;
  	unsigned long tmp;
  
  	heads = 64;
  	sectors = 32;
  	tmp = heads * sectors;
  	cylinders = capacity;
  	sector_div(cylinders, tmp);
  
  	if (capacity >= 0x200000) {
  		heads = 255;
  		sectors = 63;
  		tmp = heads * sectors;
  		cylinders = capacity;
  		sector_div(cylinders, tmp);
  	}
  	geom[0] = heads;
  	geom[1] = sectors;
  	geom[2] = cylinders;
  
  	return 0;
  }
  
  static struct scsi_host_template mvumi_template = {
  
  	.module = THIS_MODULE,
  	.name = "Marvell Storage Controller",
  	.slave_configure = mvumi_slave_configure,
  	.queuecommand = mvumi_queue_command,
103eb3b5d   Christoph Hellwig   scsi: mvumi: remo...
2212
  	.eh_timed_out = mvumi_timed_out,
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2213
2214
  	.eh_host_reset_handler = mvumi_host_reset,
  	.bios_param = mvumi_bios_param,
4af14d113   Christoph Hellwig   scsi: remove the ...
2215
  	.dma_boundary = PAGE_SIZE - 1,
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2216
2217
  	.this_id = -1,
  };
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
  static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
  {
  	void *base = NULL;
  	struct mvumi_hw_regs *regs;
  
  	switch (mhba->pdev->device) {
  	case PCI_DEVICE_ID_MARVELL_MV9143:
  		mhba->mmio = mhba->base_addr[0];
  		base = mhba->mmio;
  		if (!mhba->regs) {
  			mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
  			if (mhba->regs == NULL)
  				return -ENOMEM;
  		}
  		regs = mhba->regs;
  
  		/* For Arm */
  		regs->ctrl_sts_reg          = base + 0x20104;
  		regs->rstoutn_mask_reg      = base + 0x20108;
  		regs->sys_soft_rst_reg      = base + 0x2010C;
  		regs->main_int_cause_reg    = base + 0x20200;
  		regs->enpointa_mask_reg     = base + 0x2020C;
  		regs->rstoutn_en_reg        = base + 0xF1400;
  		/* For Doorbell */
  		regs->pciea_to_arm_drbl_reg = base + 0x20400;
  		regs->arm_to_pciea_drbl_reg = base + 0x20408;
  		regs->arm_to_pciea_mask_reg = base + 0x2040C;
  		regs->pciea_to_arm_msg0     = base + 0x20430;
  		regs->pciea_to_arm_msg1     = base + 0x20434;
  		regs->arm_to_pciea_msg0     = base + 0x20438;
  		regs->arm_to_pciea_msg1     = base + 0x2043C;
  
  		/* For Message Unit */
  
  		regs->inb_aval_count_basel  = base + 0x508;
  		regs->inb_aval_count_baseh  = base + 0x50C;
  		regs->inb_write_pointer     = base + 0x518;
  		regs->inb_read_pointer      = base + 0x51C;
  		regs->outb_coal_cfg         = base + 0x568;
  		regs->outb_copy_basel       = base + 0x5B0;
  		regs->outb_copy_baseh       = base + 0x5B4;
  		regs->outb_copy_pointer     = base + 0x544;
  		regs->outb_read_pointer     = base + 0x548;
  		regs->outb_isr_cause        = base + 0x560;
  		regs->outb_coal_cfg         = base + 0x568;
  		/* Bit setting for HW */
  		regs->int_comaout           = 1 << 8;
  		regs->int_comaerr           = 1 << 6;
  		regs->int_dl_cpu2pciea      = 1 << 1;
  		regs->cl_pointer_toggle     = 1 << 12;
  		regs->clic_irq              = 1 << 1;
  		regs->clic_in_err           = 1 << 8;
  		regs->clic_out_err          = 1 << 12;
  		regs->cl_slot_num_mask      = 0xFFF;
  		regs->int_drbl_int_mask     = 0x3FFFFFFF;
  		regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
  							regs->int_comaerr;
  		break;
  	case PCI_DEVICE_ID_MARVELL_MV9580:
  		mhba->mmio = mhba->base_addr[2];
  		base = mhba->mmio;
  		if (!mhba->regs) {
  			mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
  			if (mhba->regs == NULL)
  				return -ENOMEM;
  		}
  		regs = mhba->regs;
  		/* For Arm */
  		regs->ctrl_sts_reg          = base + 0x20104;
  		regs->rstoutn_mask_reg      = base + 0x1010C;
  		regs->sys_soft_rst_reg      = base + 0x10108;
  		regs->main_int_cause_reg    = base + 0x10200;
  		regs->enpointa_mask_reg     = base + 0x1020C;
  		regs->rstoutn_en_reg        = base + 0xF1400;
  
  		/* For Doorbell */
  		regs->pciea_to_arm_drbl_reg = base + 0x10460;
  		regs->arm_to_pciea_drbl_reg = base + 0x10480;
  		regs->arm_to_pciea_mask_reg = base + 0x10484;
  		regs->pciea_to_arm_msg0     = base + 0x10400;
  		regs->pciea_to_arm_msg1     = base + 0x10404;
  		regs->arm_to_pciea_msg0     = base + 0x10420;
  		regs->arm_to_pciea_msg1     = base + 0x10424;
  
  		/* For reset*/
  		regs->reset_request         = base + 0x10108;
  		regs->reset_enable          = base + 0x1010c;
  
  		/* For Message Unit */
  		regs->inb_aval_count_basel  = base + 0x4008;
  		regs->inb_aval_count_baseh  = base + 0x400C;
  		regs->inb_write_pointer     = base + 0x4018;
  		regs->inb_read_pointer      = base + 0x401C;
  		regs->outb_copy_basel       = base + 0x4058;
  		regs->outb_copy_baseh       = base + 0x405C;
  		regs->outb_copy_pointer     = base + 0x406C;
  		regs->outb_read_pointer     = base + 0x4070;
  		regs->outb_coal_cfg         = base + 0x4080;
  		regs->outb_isr_cause        = base + 0x4088;
  		/* Bit setting for HW */
  		regs->int_comaout           = 1 << 4;
  		regs->int_dl_cpu2pciea      = 1 << 12;
  		regs->int_comaerr           = 1 << 29;
  		regs->cl_pointer_toggle     = 1 << 14;
  		regs->cl_slot_num_mask      = 0x3FFF;
  		regs->clic_irq              = 1 << 0;
  		regs->clic_out_err          = 1 << 1;
  		regs->int_drbl_int_mask     = 0x3FFFFFFF;
  		regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
  		break;
  	default:
  		return -1;
  		break;
  	}
  
  	return 0;
  }
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
  /**
   * mvumi_init_fw -	Initializes the FW
   * @mhba:		Adapter soft state
   *
   * This is the main function for initializing firmware.
   */
  static int mvumi_init_fw(struct mvumi_hba *mhba)
  {
  	int ret = 0;
  
  	if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
  		dev_err(&mhba->pdev->dev, "IO memory region busy!
  ");
  		return -EBUSY;
  	}
  	ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
  	if (ret)
  		goto fail_ioremap;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2353
2354
  	switch (mhba->pdev->device) {
  	case PCI_DEVICE_ID_MARVELL_MV9143:
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2355
  		mhba->instancet = &mvumi_instance_9143;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2356
2357
2358
2359
  		mhba->io_seq = 0;
  		mhba->max_sge = MVUMI_MAX_SG_ENTRY;
  		mhba->request_id_enabled = 1;
  		break;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2360
2361
2362
2363
2364
  	case PCI_DEVICE_ID_MARVELL_MV9580:
  		mhba->instancet = &mvumi_instance_9580;
  		mhba->io_seq = 0;
  		mhba->max_sge = MVUMI_MAX_SG_ENTRY;
  		break;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
  	default:
  		dev_err(&mhba->pdev->dev, "device 0x%x not supported!
  ",
  							mhba->pdev->device);
  		mhba->instancet = NULL;
  		ret = -EINVAL;
  		goto fail_alloc_mem;
  	}
  	dev_dbg(&mhba->pdev->dev, "device id : %04X is found.
  ",
  							mhba->pdev->device);
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2376
2377
2378
2379
2380
2381
2382
2383
  	ret = mvumi_cfg_hw_reg(mhba);
  	if (ret) {
  		dev_err(&mhba->pdev->dev,
  			"failed to allocate memory for reg
  ");
  		ret = -ENOMEM;
  		goto fail_alloc_mem;
  	}
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
2384
2385
  	mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev,
  			HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2386
2387
2388
2389
2390
  	if (!mhba->handshake_page) {
  		dev_err(&mhba->pdev->dev,
  			"failed to allocate memory for handshake
  ");
  		ret = -ENOMEM;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2391
  		goto fail_alloc_page;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2392
  	}
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
  
  	if (mvumi_start(mhba)) {
  		ret = -EINVAL;
  		goto fail_ready_state;
  	}
  	ret = mvumi_alloc_cmds(mhba);
  	if (ret)
  		goto fail_ready_state;
  
  	return 0;
  
  fail_ready_state:
  	mvumi_release_mem_resource(mhba);
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
2406
  	dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2407
2408
2409
  		mhba->handshake_page, mhba->handshake_page_phys);
  fail_alloc_page:
  	kfree(mhba->regs);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
  fail_alloc_mem:
  	mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
  fail_ioremap:
  	pci_release_regions(mhba->pdev);
  
  	return ret;
  }
  
  /**
   * mvumi_io_attach -	Attaches this driver to SCSI mid-layer
   * @mhba:		Adapter soft state
   */
  static int mvumi_io_attach(struct mvumi_hba *mhba)
  {
  	struct Scsi_Host *host = mhba->shost;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2425
  	struct scsi_device *sdev = NULL;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2426
2427
2428
2429
2430
2431
2432
2433
2434
  	int ret;
  	unsigned int max_sg = (mhba->ib_max_size + 4 -
  		sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
  
  	host->irq = mhba->pdev->irq;
  	host->unique_id = mhba->unique_id;
  	host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
  	host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
  	host->max_sectors = mhba->max_transfer_size / 512;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2435
  	host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2436
2437
  	host->max_id = mhba->max_target_id;
  	host->max_cmd_len = MAX_COMMAND_SIZE;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2438
2439
2440
2441
2442
2443
2444
2445
  
  	ret = scsi_add_host(host, &mhba->pdev->dev);
  	if (ret) {
  		dev_err(&mhba->pdev->dev, "scsi_add_host failed
  ");
  		return ret;
  	}
  	mhba->fw_flag |= MVUMI_FW_ATTACH;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2446

bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
  	mutex_lock(&mhba->sas_discovery_mutex);
  	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
  		ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
  	else
  		ret = 0;
  	if (ret) {
  		dev_err(&mhba->pdev->dev, "add virtual device failed
  ");
  		mutex_unlock(&mhba->sas_discovery_mutex);
  		goto fail_add_device;
  	}
  
  	mhba->dm_thread = kthread_create(mvumi_rescan_bus,
  						mhba, "mvumi_scanthread");
  	if (IS_ERR(mhba->dm_thread)) {
  		dev_err(&mhba->pdev->dev,
  			"failed to create device scan thread
  ");
  		mutex_unlock(&mhba->sas_discovery_mutex);
  		goto fail_create_thread;
  	}
  	atomic_set(&mhba->pnp_count, 1);
  	wake_up_process(mhba->dm_thread);
  
  	mutex_unlock(&mhba->sas_discovery_mutex);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2472
  	return 0;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
  
  fail_create_thread:
  	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
  		sdev = scsi_device_lookup(mhba->shost, 0,
  						mhba->max_target_id - 1, 0);
  	if (sdev) {
  		scsi_remove_device(sdev);
  		scsi_device_put(sdev);
  	}
  fail_add_device:
  	scsi_remove_host(mhba->shost);
  	return ret;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2485
2486
2487
2488
2489
2490
2491
  }
  
  /**
   * mvumi_probe_one -	PCI hotplug entry point
   * @pdev:		PCI device structure
   * @id:			PCI ids of supported hotplugged adapter
   */
6f0397905   Greg Kroah-Hartman   Drivers: scsi: re...
2492
  static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
  {
  	struct Scsi_Host *host;
  	struct mvumi_hba *mhba;
  	int ret;
  
  	dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
  			pdev->vendor, pdev->device, pdev->subsystem_vendor,
  			pdev->subsystem_device);
  
  	ret = pci_enable_device(pdev);
  	if (ret)
  		return ret;
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
2505
2506
2507
  	ret = mvumi_pci_set_master(pdev);
  	if (ret)
  		goto fail_set_dma_mask;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
  
  	host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
  	if (!host) {
  		dev_err(&pdev->dev, "scsi_host_alloc failed
  ");
  		ret = -ENOMEM;
  		goto fail_alloc_instance;
  	}
  	mhba = shost_priv(host);
  
  	INIT_LIST_HEAD(&mhba->cmd_pool);
  	INIT_LIST_HEAD(&mhba->ob_data_list);
  	INIT_LIST_HEAD(&mhba->free_ob_list);
  	INIT_LIST_HEAD(&mhba->res_list);
  	INIT_LIST_HEAD(&mhba->waiting_req_list);
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2523
2524
2525
  	mutex_init(&mhba->device_lock);
  	INIT_LIST_HEAD(&mhba->mhba_dev_list);
  	INIT_LIST_HEAD(&mhba->shost_dev_list);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2526
2527
  	atomic_set(&mhba->fw_outstanding, 0);
  	init_waitqueue_head(&mhba->int_cmd_wait_q);
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2528
  	mutex_init(&mhba->sas_discovery_mutex);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
  
  	mhba->pdev = pdev;
  	mhba->shost = host;
  	mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
  
  	ret = mvumi_init_fw(mhba);
  	if (ret)
  		goto fail_init_fw;
  
  	ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
  				"mvumi", mhba);
  	if (ret) {
  		dev_err(&pdev->dev, "failed to register IRQ
  ");
  		goto fail_init_irq;
  	}
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2545
2546
  
  	mhba->instancet->enable_intr(mhba);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2547
2548
2549
2550
2551
  	pci_set_drvdata(pdev, mhba);
  
  	ret = mvumi_io_attach(mhba);
  	if (ret)
  		goto fail_io_attach;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2552
2553
  
  	mvumi_backup_bar_addr(mhba);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2554
2555
2556
2557
2558
2559
  	dev_dbg(&pdev->dev, "probe mvumi driver successfully.
  ");
  
  	return 0;
  
  fail_io_attach:
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2560
  	mhba->instancet->disable_intr(mhba);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
  	free_irq(mhba->pdev->irq, mhba);
  fail_init_irq:
  	mvumi_release_fw(mhba);
  fail_init_fw:
  	scsi_host_put(host);
  
  fail_alloc_instance:
  fail_set_dma_mask:
  	pci_disable_device(pdev);
  
  	return ret;
  }
  
  static void mvumi_detach_one(struct pci_dev *pdev)
  {
  	struct Scsi_Host *host;
  	struct mvumi_hba *mhba;
  
  	mhba = pci_get_drvdata(pdev);
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2580
2581
2582
2583
2584
2585
  	if (mhba->dm_thread) {
  		kthread_stop(mhba->dm_thread);
  		mhba->dm_thread = NULL;
  	}
  
  	mvumi_detach_devices(mhba);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2586
2587
2588
  	host = mhba->shost;
  	scsi_remove_host(mhba->shost);
  	mvumi_flush_cache(mhba);
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2589
  	mhba->instancet->disable_intr(mhba);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2590
2591
2592
  	free_irq(mhba->pdev->irq, mhba);
  	mvumi_release_fw(mhba);
  	scsi_host_put(host);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
  	pci_disable_device(pdev);
  	dev_dbg(&pdev->dev, "driver is removed!
  ");
  }
  
  /**
   * mvumi_shutdown -	Shutdown entry point
   * @device:		Generic device structure
   */
  static void mvumi_shutdown(struct pci_dev *pdev)
  {
  	struct mvumi_hba *mhba = pci_get_drvdata(pdev);
  
  	mvumi_flush_cache(mhba);
  }
fddbeb80a   Arnd Bergmann   scsi: mvumi: use ...
2608
  static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2609
2610
2611
2612
2613
2614
2615
  {
  	struct mvumi_hba *mhba = NULL;
  
  	mhba = pci_get_drvdata(pdev);
  	mvumi_flush_cache(mhba);
  
  	pci_set_drvdata(pdev, mhba);
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2616
  	mhba->instancet->disable_intr(mhba);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2617
2618
2619
2620
2621
2622
2623
2624
2625
  	free_irq(mhba->pdev->irq, mhba);
  	mvumi_unmap_pci_addr(pdev, mhba->base_addr);
  	pci_release_regions(pdev);
  	pci_save_state(pdev);
  	pci_disable_device(pdev);
  	pci_set_power_state(pdev, pci_choose_state(pdev, state));
  
  	return 0;
  }
fddbeb80a   Arnd Bergmann   scsi: mvumi: use ...
2626
  static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
  {
  	int ret;
  	struct mvumi_hba *mhba = NULL;
  
  	mhba = pci_get_drvdata(pdev);
  
  	pci_set_power_state(pdev, PCI_D0);
  	pci_enable_wake(pdev, PCI_D0, 0);
  	pci_restore_state(pdev);
  
  	ret = pci_enable_device(pdev);
  	if (ret) {
  		dev_err(&pdev->dev, "enable device failed
  ");
  		return ret;
  	}
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
2643
2644
  
  	ret = mvumi_pci_set_master(pdev);
bddbd00cb   Christoph Hellwig   scsi: mvumi: use ...
2645
  	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
ab8e7f4bd   Christoph Hellwig   scsi: mvumi: swit...
2646
2647
  	if (ret)
  		goto fail;
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2648
2649
2650
2651
2652
2653
  	ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
  	if (ret)
  		goto fail;
  	ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
  	if (ret)
  		goto release_regions;
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2654
2655
2656
2657
  	if (mvumi_cfg_hw_reg(mhba)) {
  		ret = -EINVAL;
  		goto unmap_pci_addr;
  	}
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2658
  	mhba->mmio = mhba->base_addr[0];
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2659
  	mvumi_reset(mhba);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
  
  	if (mvumi_start(mhba)) {
  		ret = -EINVAL;
  		goto unmap_pci_addr;
  	}
  
  	ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
  				"mvumi", mhba);
  	if (ret) {
  		dev_err(&pdev->dev, "failed to register IRQ
  ");
  		goto unmap_pci_addr;
  	}
bd756ddea   Shun Fu   [SCSI] mvumi: Add...
2673
  	mhba->instancet->enable_intr(mhba);
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
  
  	return 0;
  
  unmap_pci_addr:
  	mvumi_unmap_pci_addr(pdev, mhba->base_addr);
  release_regions:
  	pci_release_regions(pdev);
  fail:
  	pci_disable_device(pdev);
  
  	return ret;
  }
  
  static struct pci_driver mvumi_pci_driver = {
  
  	.name = MV_DRIVER_NAME,
  	.id_table = mvumi_pci_table,
  	.probe = mvumi_probe_one,
6f0397905   Greg Kroah-Hartman   Drivers: scsi: re...
2692
  	.remove = mvumi_detach_one,
f0c568a47   Jianyun Li   [SCSI] mvumi: Add...
2693
2694
2695
2696
2697
2698
  	.shutdown = mvumi_shutdown,
  #ifdef CONFIG_PM
  	.suspend = mvumi_suspend,
  	.resume = mvumi_resume,
  #endif
  };
f9c25ccfc   YueHaibing   scsi: mvumi: Usin...
2699
  module_pci_driver(mvumi_pci_driver);