Commit 20d3b6475226fbde372b1ce51f26b5379e340759

Authored by Johannes Berg
Committed by John W. Linville
1 parent ac91f91045

iwlwifi: clean up coding style in PCIe transport

Mostly clean up indentation around parentheses
after if, function calls, etc. and also a few
unneeded line breaks and some other things.

Reviewed-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Reviewed-by: Wey-Yi W Guy <wey-yi.w.guy@intel.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>

Showing 4 changed files with 161 additions and 185 deletions Side-by-side Diff

drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
... ... @@ -313,7 +313,7 @@
313 313 void iwl_irq_tasklet(struct iwl_trans *trans);
314 314 void iwlagn_rx_replenish(struct iwl_trans *trans);
315 315 void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
316   - struct iwl_rx_queue *q);
  316 + struct iwl_rx_queue *q);
317 317  
318 318 /*****************************************************
319 319 * ICT
... ... @@ -328,7 +328,7 @@
328 328 * TX / HCMD
329 329 ******************************************************/
330 330 void iwl_txq_update_write_ptr(struct iwl_trans *trans,
331   - struct iwl_tx_queue *txq);
  331 + struct iwl_tx_queue *txq);
332 332 int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
333 333 struct iwl_tx_queue *txq,
334 334 dma_addr_t addr, u16 len, u8 reset);
... ... @@ -337,8 +337,8 @@
337 337 void iwl_tx_cmd_complete(struct iwl_trans *trans,
338 338 struct iwl_rx_cmd_buffer *rxb, int handler_status);
339 339 void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
340   - struct iwl_tx_queue *txq,
341   - u16 byte_cnt);
  340 + struct iwl_tx_queue *txq,
  341 + u16 byte_cnt);
342 342 void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int queue);
343 343 void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
344 344 void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
... ... @@ -130,7 +130,7 @@
130 130 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
131 131 */
132 132 void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
133   - struct iwl_rx_queue *q)
  133 + struct iwl_rx_queue *q)
134 134 {
135 135 unsigned long flags;
136 136 u32 reg;
... ... @@ -201,9 +201,7 @@
201 201 */
202 202 static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
203 203 {
204   - struct iwl_trans_pcie *trans_pcie =
205   - IWL_TRANS_GET_PCIE_TRANS(trans);
206   -
  204 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
207 205 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
208 206 struct list_head *element;
209 207 struct iwl_rx_mem_buffer *rxb;
... ... @@ -253,9 +251,7 @@
253 251 */
254 252 static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
255 253 {
256   - struct iwl_trans_pcie *trans_pcie =
257   - IWL_TRANS_GET_PCIE_TRANS(trans);
258   -
  254 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
259 255 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
260 256 struct list_head *element;
261 257 struct iwl_rx_mem_buffer *rxb;
... ... @@ -278,8 +274,7 @@
278 274 gfp_mask |= __GFP_COMP;
279 275  
280 276 /* Alloc a new receive buffer */
281   - page = alloc_pages(gfp_mask,
282   - trans_pcie->rx_page_order);
  277 + page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
283 278 if (!page) {
284 279 if (net_ratelimit())
285 280 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
... ... @@ -315,9 +310,10 @@
315 310 BUG_ON(rxb->page);
316 311 rxb->page = page;
317 312 /* Get physical address of the RB */
318   - rxb->page_dma = dma_map_page(trans->dev, page, 0,
319   - PAGE_SIZE << trans_pcie->rx_page_order,
320   - DMA_FROM_DEVICE);
  313 + rxb->page_dma =
  314 + dma_map_page(trans->dev, page, 0,
  315 + PAGE_SIZE << trans_pcie->rx_page_order,
  316 + DMA_FROM_DEVICE);
321 317 /* dma address must be no more than 36 bits */
322 318 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
323 319 /* and also 256 byte aligned! */
... ... @@ -465,8 +461,8 @@
465 461 if (rxb->page != NULL) {
466 462 rxb->page_dma =
467 463 dma_map_page(trans->dev, rxb->page, 0,
468   - PAGE_SIZE << trans_pcie->rx_page_order,
469   - DMA_FROM_DEVICE);
  464 + PAGE_SIZE << trans_pcie->rx_page_order,
  465 + DMA_FROM_DEVICE);
470 466 list_add_tail(&rxb->list, &rxq->rx_free);
471 467 rxq->free_count++;
472 468 } else
473 469  
474 470  
... ... @@ -546,12 +542,12 @@
546 542 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
547 543 if (trans->cfg->internal_wimax_coex &&
548 544 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
549   - APMS_CLK_VAL_MRB_FUNC_MODE) ||
  545 + APMS_CLK_VAL_MRB_FUNC_MODE) ||
550 546 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
551   - APMG_PS_CTRL_VAL_RESET_REQ))) {
552   - struct iwl_trans_pcie *trans_pcie;
  547 + APMG_PS_CTRL_VAL_RESET_REQ))) {
  548 + struct iwl_trans_pcie *trans_pcie =
  549 + IWL_TRANS_GET_PCIE_TRANS(trans);
553 550  
554   - trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
555 551 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
556 552 iwl_op_mode_wimax_active(trans->op_mode);
557 553 wake_up(&trans->wait_command_queue);
... ... @@ -567,6 +563,8 @@
567 563 /* tasklet for iwlagn interrupt */
568 564 void iwl_irq_tasklet(struct iwl_trans *trans)
569 565 {
  566 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  567 + struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
570 568 u32 inta = 0;
571 569 u32 handled = 0;
572 570 unsigned long flags;
... ... @@ -575,10 +573,6 @@
575 573 u32 inta_mask;
576 574 #endif
577 575  
578   - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
579   - struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
580   -
581   -
582 576 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
583 577  
584 578 /* Ack/clear/reset pending uCode interrupts.
... ... @@ -593,7 +587,7 @@
593 587 * interrupt coalescing can still be achieved.
594 588 */
595 589 iwl_write32(trans, CSR_INT,
596   - trans_pcie->inta | ~trans_pcie->inta_mask);
  590 + trans_pcie->inta | ~trans_pcie->inta_mask);
597 591  
598 592 inta = trans_pcie->inta;
599 593  
... ... @@ -602,7 +596,7 @@
602 596 /* just for debug */
603 597 inta_mask = iwl_read32(trans, CSR_INT_MASK);
604 598 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
605   - inta, inta_mask);
  599 + inta, inta_mask);
606 600 }
607 601 #endif
608 602  
... ... @@ -651,7 +645,7 @@
651 645  
652 646 hw_rfkill = iwl_is_rfkill_set(trans);
653 647 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
654   - hw_rfkill ? "disable radio" : "enable radio");
  648 + hw_rfkill ? "disable radio" : "enable radio");
655 649  
656 650 isr_stats->rfkill++;
657 651  
... ... @@ -693,7 +687,7 @@
693 687 * Rx "responses" (frame-received notification), and other
694 688 * notifications from uCode come through here*/
695 689 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
696   - CSR_INT_BIT_RX_PERIODIC)) {
  690 + CSR_INT_BIT_RX_PERIODIC)) {
697 691 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
698 692 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
699 693 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
... ... @@ -733,7 +727,7 @@
733 727 */
734 728 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
735 729 iwl_write8(trans, CSR_INT_PERIODIC_REG,
736   - CSR_INT_PERIODIC_ENA);
  730 + CSR_INT_PERIODIC_ENA);
737 731  
738 732 isr_stats->rx++;
739 733 }
... ... @@ -782,8 +776,7 @@
782 776 /* Free dram table */
783 777 void iwl_free_isr_ict(struct iwl_trans *trans)
784 778 {
785   - struct iwl_trans_pcie *trans_pcie =
786   - IWL_TRANS_GET_PCIE_TRANS(trans);
  779 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
787 780  
788 781 if (trans_pcie->ict_tbl) {
789 782 dma_free_coherent(trans->dev, ICT_SIZE,
... ... @@ -802,8 +795,7 @@
802 795 */
803 796 int iwl_alloc_isr_ict(struct iwl_trans *trans)
804 797 {
805   - struct iwl_trans_pcie *trans_pcie =
806   - IWL_TRANS_GET_PCIE_TRANS(trans);
  798 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
807 799  
808 800 trans_pcie->ict_tbl =
809 801 dma_alloc_coherent(trans->dev, ICT_SIZE,
810 802  
... ... @@ -837,10 +829,9 @@
837 829 */
838 830 void iwl_reset_ict(struct iwl_trans *trans)
839 831 {
  832 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
840 833 u32 val;
841 834 unsigned long flags;
842   - struct iwl_trans_pcie *trans_pcie =
843   - IWL_TRANS_GET_PCIE_TRANS(trans);
844 835  
845 836 if (!trans_pcie->ict_tbl)
846 837 return;
... ... @@ -868,9 +859,7 @@
868 859 /* Device is going down disable ict interrupt usage */
869 860 void iwl_disable_ict(struct iwl_trans *trans)
870 861 {
871   - struct iwl_trans_pcie *trans_pcie =
872   - IWL_TRANS_GET_PCIE_TRANS(trans);
873   -
  862 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
874 863 unsigned long flags;
875 864  
876 865 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
... ... @@ -934,7 +923,7 @@
934 923 if (likely(inta))
935 924 tasklet_schedule(&trans_pcie->irq_tasklet);
936 925 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
937   - !trans_pcie->inta)
  926 + !trans_pcie->inta)
938 927 iwl_enable_interrupts(trans);
939 928  
940 929 unplugged:
... ... @@ -945,7 +934,7 @@
945 934 /* re-enable interrupts here since we don't have anything to service. */
946 935 /* only Re-enable if disabled by irq and no schedules tasklet. */
947 936 if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
948   - !trans_pcie->inta)
  937 + !trans_pcie->inta)
949 938 iwl_enable_interrupts(trans);
950 939  
951 940 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
... ... @@ -1036,7 +1025,7 @@
1036 1025  
1037 1026 inta = (0xff & val) | ((0xff00 & val) << 16);
1038 1027 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
1039   - inta, inta_mask, val);
  1028 + inta, inta_mask, val);
1040 1029  
1041 1030 inta &= trans_pcie->inta_mask;
1042 1031 trans_pcie->inta |= inta;
drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
... ... @@ -47,12 +47,11 @@
47 47 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
48 48 */
49 49 void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
50   - struct iwl_tx_queue *txq,
51   - u16 byte_cnt)
  50 + struct iwl_tx_queue *txq,
  51 + u16 byte_cnt)
52 52 {
53 53 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
54   - struct iwl_trans_pcie *trans_pcie =
55   - IWL_TRANS_GET_PCIE_TRANS(trans);
  54 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
56 55 int write_ptr = txq->q.write_ptr;
57 56 int txq_id = txq->q.id;
58 57 u8 sec_ctl = 0;
... ... @@ -270,7 +269,7 @@
270 269 /* Each TFD can point to a maximum 20 Tx buffers */
271 270 if (num_tbs >= IWL_NUM_OF_TBS) {
272 271 IWL_ERR(trans, "Error can not send more than %d chunks\n",
273   - IWL_NUM_OF_TBS);
  272 + IWL_NUM_OF_TBS);
274 273 return -EINVAL;
275 274 }
276 275  
... ... @@ -279,7 +278,7 @@
279 278  
280 279 if (unlikely(addr & ~IWL_TX_DMA_MASK))
281 280 IWL_ERR(trans, "Unaligned address = %llx\n",
282   - (unsigned long long)addr);
  281 + (unsigned long long)addr);
283 282  
284 283 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
285 284  
286 285  
287 286  
... ... @@ -383,15 +382,13 @@
383 382 }
384 383  
385 384 static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
386   - u16 txq_id)
  385 + u16 txq_id)
387 386 {
  387 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
388 388 u32 tbl_dw_addr;
389 389 u32 tbl_dw;
390 390 u16 scd_q2ratid;
391 391  
392   - struct iwl_trans_pcie *trans_pcie =
393   - IWL_TRANS_GET_PCIE_TRANS(trans);
394   -
395 392 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
396 393  
397 394 tbl_dw_addr = trans_pcie->scd_base_addr +
398 395  
... ... @@ -419,12 +416,11 @@
419 416 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
420 417 }
421 418  
422   -void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
423   - int txq_id, u32 index)
  419 +void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index)
424 420 {
425 421 IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d\n", txq_id, index & 0xff);
426 422 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
427   - (index & 0xff) | (txq_id << 8));
  423 + (index & 0xff) | (txq_id << 8));
428 424 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
429 425 }
430 426  
431 427  
... ... @@ -615,13 +611,13 @@
615 611 }
616 612  
617 613 IWL_DEBUG_HC(trans,
618   - "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
619   - trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
620   - out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
621   - q->write_ptr, idx, trans_pcie->cmd_queue);
  614 + "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
  615 + trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
  616 + out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
  617 + cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
622 618  
623 619 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
624   - DMA_BIDIRECTIONAL);
  620 + DMA_BIDIRECTIONAL);
625 621 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
626 622 idx = -ENOMEM;
627 623 goto out;
... ... @@ -630,8 +626,7 @@
630 626 dma_unmap_addr_set(out_meta, mapping, phys_addr);
631 627 dma_unmap_len_set(out_meta, len, copy_size);
632 628  
633   - iwlagn_txq_attach_buf_to_tfd(trans, txq,
634   - phys_addr, copy_size, 1);
  629 + iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
635 630 #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
636 631 trace_bufs[0] = &out_cmd->hdr;
637 632 trace_lens[0] = copy_size;
... ... @@ -643,8 +638,7 @@
643 638 continue;
644 639 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
645 640 continue;
646   - phys_addr = dma_map_single(trans->dev,
647   - (void *)cmd->data[i],
  641 + phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i],
648 642 cmd->len[i], DMA_BIDIRECTIONAL);
649 643 if (dma_mapping_error(trans->dev, phys_addr)) {
650 644 iwl_unmap_tfd(trans, out_meta,
... ... @@ -723,9 +717,10 @@
723 717 lockdep_assert_held(&txq->lock);
724 718  
725 719 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
726   - IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
727   - "index %d is out of range [0-%d] %d %d.\n", __func__,
728   - txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
  720 + IWL_ERR(trans,
  721 + "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
  722 + __func__, txq_id, idx, q->n_bd,
  723 + q->write_ptr, q->read_ptr);
729 724 return;
730 725 }
731 726  
... ... @@ -733,8 +728,8 @@
733 728 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
734 729  
735 730 if (nfreed++ > 0) {
736   - IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx,
737   - q->write_ptr, q->read_ptr);
  731 + IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
  732 + idx, q->write_ptr, q->read_ptr);
738 733 iwl_op_mode_nic_error(trans->op_mode);
739 734 }
740 735  
... ... @@ -771,9 +766,9 @@
771 766 * in the queue management code. */
772 767 if (WARN(txq_id != trans_pcie->cmd_queue,
773 768 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
774   - txq_id, trans_pcie->cmd_queue, sequence,
775   - trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
776   - trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
  769 + txq_id, trans_pcie->cmd_queue, sequence,
  770 + trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
  771 + trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
777 772 iwl_print_hex_error(trans, pkt, 32);
778 773 return;
779 774 }
... ... @@ -869,8 +864,9 @@
869 864 }
870 865  
871 866 ret = wait_event_timeout(trans->wait_command_queue,
872   - !test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status),
873   - HOST_COMPLETE_TIMEOUT);
  867 + !test_bit(STATUS_HCMD_ACTIVE,
  868 + &trans_pcie->status),
  869 + HOST_COMPLETE_TIMEOUT);
874 870 if (!ret) {
875 871 if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
876 872 struct iwl_tx_queue *txq =
... ... @@ -955,10 +951,10 @@
955 951  
956 952 if ((index >= q->n_bd) ||
957 953 (iwl_queue_used(q, last_to_free) == 0)) {
958   - IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
959   - "last_to_free %d is out of range [0-%d] %d %d.\n",
960   - __func__, txq_id, last_to_free, q->n_bd,
961   - q->write_ptr, q->read_ptr);
  954 + IWL_ERR(trans,
  955 + "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
  956 + __func__, txq_id, last_to_free, q->n_bd,
  957 + q->write_ptr, q->read_ptr);
962 958 return 0;
963 959 }
964 960  
drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
... ... @@ -84,8 +84,7 @@
84 84  
85 85 static int iwl_trans_rx_alloc(struct iwl_trans *trans)
86 86 {
87   - struct iwl_trans_pcie *trans_pcie =
88   - IWL_TRANS_GET_PCIE_TRANS(trans);
  87 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
89 88 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
90 89 struct device *dev = trans->dev;
91 90  
... ... @@ -112,7 +111,7 @@
112 111  
113 112 err_rb_stts:
114 113 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
115   - rxq->bd, rxq->bd_dma);
  114 + rxq->bd, rxq->bd_dma);
116 115 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
117 116 rxq->bd = NULL;
118 117 err_bd:
... ... @@ -121,8 +120,7 @@
121 120  
122 121 static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
123 122 {
124   - struct iwl_trans_pcie *trans_pcie =
125   - IWL_TRANS_GET_PCIE_TRANS(trans);
  123 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
126 124 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
127 125 int i;
128 126  
... ... @@ -132,8 +130,8 @@
132 130 * to an SKB, so we need to unmap and free potential storage */
133 131 if (rxq->pool[i].page != NULL) {
134 132 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
135   - PAGE_SIZE << trans_pcie->rx_page_order,
136   - DMA_FROM_DEVICE);
  133 + PAGE_SIZE << trans_pcie->rx_page_order,
  134 + DMA_FROM_DEVICE);
137 135 __free_pages(rxq->pool[i].page,
138 136 trans_pcie->rx_page_order);
139 137 rxq->pool[i].page = NULL;
... ... @@ -191,8 +189,7 @@
191 189  
192 190 static int iwl_rx_init(struct iwl_trans *trans)
193 191 {
194   - struct iwl_trans_pcie *trans_pcie =
195   - IWL_TRANS_GET_PCIE_TRANS(trans);
  192 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
196 193 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
197 194  
198 195 int i, err;
199 196  
... ... @@ -234,10 +231,8 @@
234 231  
235 232 static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
236 233 {
237   - struct iwl_trans_pcie *trans_pcie =
238   - IWL_TRANS_GET_PCIE_TRANS(trans);
  234 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
239 235 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
240   -
241 236 unsigned long flags;
242 237  
243 238 /*if rxq->bd is NULL, it means that nothing has been allocated,
244 239  
... ... @@ -272,11 +267,11 @@
272 267 /* stop Rx DMA */
273 268 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
274 269 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
275   - FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
  270 + FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
276 271 }
277 272  
278   -static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
279   - struct iwl_dma_ptr *ptr, size_t size)
  273 +static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
  274 + struct iwl_dma_ptr *ptr, size_t size)
280 275 {
281 276 if (WARN_ON(ptr->addr))
282 277 return -EINVAL;
... ... @@ -289,8 +284,8 @@
289 284 return 0;
290 285 }
291 286  
292   -static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
293   - struct iwl_dma_ptr *ptr)
  287 +static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
  288 + struct iwl_dma_ptr *ptr)
294 289 {
295 290 if (unlikely(!ptr->addr))
296 291 return;
297 292  
298 293  
... ... @@ -327,12 +322,12 @@
327 322 }
328 323  
329 324 static int iwl_trans_txq_alloc(struct iwl_trans *trans,
330   - struct iwl_tx_queue *txq, int slots_num,
331   - u32 txq_id)
  325 + struct iwl_tx_queue *txq, int slots_num,
  326 + u32 txq_id)
332 327 {
  328 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
333 329 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
334 330 int i;
335   - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
336 331  
337 332 if (WARN_ON(txq->entries || txq->tfds))
338 333 return -EINVAL;
... ... @@ -453,6 +448,7 @@
453 448 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
454 449 struct device *dev = trans->dev;
455 450 int i;
  451 +
456 452 if (WARN_ON(!txq))
457 453 return;
458 454  
459 455  
... ... @@ -572,11 +568,11 @@
572 568 }
573 569 static int iwl_tx_init(struct iwl_trans *trans)
574 570 {
  571 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
575 572 int ret;
576 573 int txq_id, slots_num;
577 574 unsigned long flags;
578 575 bool alloc = false;
579   - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
580 576  
581 577 if (!trans_pcie->txq) {
582 578 ret = iwl_trans_tx_alloc(trans);
583 579  
... ... @@ -641,10 +637,9 @@
641 637  
642 638 static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
643 639 {
  640 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
644 641 int pos;
645 642 u16 pci_lnk_ctl;
646   - struct iwl_trans_pcie *trans_pcie =
647   - IWL_TRANS_GET_PCIE_TRANS(trans);
648 643  
649 644 struct pci_dev *pci_dev = trans_pcie->pci_dev;
650 645  
651 646  
... ... @@ -698,14 +693,14 @@
698 693  
699 694 /* Disable L0S exit timer (platform NMI Work/Around) */
700 695 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
701   - CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
  696 + CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
702 697  
703 698 /*
704 699 * Disable L0s without affecting L1;
705 700 * don't wait for ICH L0s (ICH bug W/A)
706 701 */
707 702 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
708   - CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
  703 + CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
709 704  
710 705 /* Set FH wait threshold to maximum (HW error during stress W/A) */
711 706 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
... ... @@ -715,7 +710,7 @@
715 710 * wake device's PCI Express link L1a -> L0s
716 711 */
717 712 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
718   - CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
  713 + CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
719 714  
720 715 iwl_apm_config(trans);
721 716  
... ... @@ -736,8 +731,8 @@
736 731 * and accesses to uCode SRAM.
737 732 */
738 733 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
739   - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
740   - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
  734 + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
  735 + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
741 736 if (ret < 0) {
742 737 IWL_DEBUG_INFO(trans, "Failed to init the card\n");
743 738 goto out;
... ... @@ -771,8 +766,8 @@
771 766 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
772 767  
773 768 ret = iwl_poll_bit(trans, CSR_RESET,
774   - CSR_RESET_REG_FLAG_MASTER_DISABLED,
775   - CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
  769 + CSR_RESET_REG_FLAG_MASTER_DISABLED,
  770 + CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
776 771 if (ret)
777 772 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
778 773  
... ... @@ -814,8 +809,7 @@
814 809 iwl_apm_init(trans);
815 810  
816 811 /* Set interrupt coalescing calibration timer to default (512 usecs) */
817   - iwl_write8(trans, CSR_INT_COALESCING,
818   - IWL_HOST_INT_CALIB_TIMEOUT_DEF);
  812 + iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
819 813  
820 814 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
821 815  
... ... @@ -834,8 +828,7 @@
834 828  
835 829 if (trans->cfg->base_params->shadow_reg_enable) {
836 830 /* enable shadow regs in HW */
837   - iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
838   - 0x800FFFFF);
  831 + iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
839 832 }
840 833  
841 834 return 0;
842 835  
... ... @@ -849,13 +842,13 @@
849 842 int ret;
850 843  
851 844 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
852   - CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
  845 + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
853 846  
854 847 /* See if we got it */
855 848 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
856   - CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
857   - CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
858   - HW_READY_TIMEOUT);
  849 + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  850 + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  851 + HW_READY_TIMEOUT);
859 852  
860 853 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
861 854 return ret;
862 855  
... ... @@ -875,11 +868,11 @@
875 868  
876 869 /* If HW is not ready, prepare the conditions to check again */
877 870 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
878   - CSR_HW_IF_CONFIG_REG_PREPARE);
  871 + CSR_HW_IF_CONFIG_REG_PREPARE);
879 872  
880 873 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
881   - ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
882   - CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
  874 + ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
  875 + CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
883 876  
884 877 if (ret < 0)
885 878 return ret;
886 879  
887 880  
888 881  
889 882  
... ... @@ -906,32 +899,33 @@
906 899 trans_pcie->ucode_write_complete = false;
907 900  
908 901 iwl_write_direct32(trans,
909   - FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
910   - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
  902 + FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  903 + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
911 904  
912 905 iwl_write_direct32(trans,
913   - FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
  906 + FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
  907 + dst_addr);
914 908  
915 909 iwl_write_direct32(trans,
916 910 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
917 911 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
918 912  
919 913 iwl_write_direct32(trans,
920   - FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
921   - (iwl_get_dma_hi_addr(phy_addr)
922   - << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
  914 + FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
  915 + (iwl_get_dma_hi_addr(phy_addr)
  916 + << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
923 917  
924 918 iwl_write_direct32(trans,
925   - FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
926   - 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
927   - 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
928   - FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
  919 + FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
  920 + 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
  921 + 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
  922 + FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
929 923  
930 924 iwl_write_direct32(trans,
931   - FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
932   - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
933   - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
934   - FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
  925 + FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
  926 + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  927 + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
  928 + FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
935 929  
936 930 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
937 931 section_num);
... ... @@ -1068,7 +1062,7 @@
1068 1062 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1069 1063  
1070 1064 iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
1071   - SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
  1065 + SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
1072 1066 iwl_write_prph(trans, SCD_AGGR_SEL, 0);
1073 1067  
1074 1068 /* initiate the queues */
... ... @@ -1089,7 +1083,7 @@
1089 1083 }
1090 1084  
1091 1085 iwl_write_prph(trans, SCD_INTERRUPT_MASK,
1092   - IWL_MASK(0, trans->cfg->base_params->num_of_queues));
  1086 + IWL_MASK(0, trans->cfg->base_params->num_of_queues));
1093 1087  
1094 1088 /* Activate all Tx DMA/FIFO channels */
1095 1089 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
... ... @@ -1113,7 +1107,7 @@
1113 1107  
1114 1108 /* Enable L1-Active */
1115 1109 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
1116   - APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
  1110 + APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1117 1111 }
1118 1112  
1119 1113 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
1120 1114  
... ... @@ -1127,9 +1121,9 @@
1127 1121 */
1128 1122 static int iwl_trans_tx_stop(struct iwl_trans *trans)
1129 1123 {
  1124 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1130 1125 int ch, txq_id, ret;
1131 1126 unsigned long flags;
1132   - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1133 1127  
1134 1128 /* Turn off all Tx DMA fifos */
1135 1129 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1136 1130  
... ... @@ -1141,13 +1135,13 @@
1141 1135 iwl_write_direct32(trans,
1142 1136 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
1143 1137 ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
1144   - FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
1145   - 1000);
  1138 + FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
1146 1139 if (ret < 0)
1147   - IWL_ERR(trans, "Failing on timeout while stopping"
1148   - " DMA channel %d [0x%08x]", ch,
1149   - iwl_read_direct32(trans,
1150   - FH_TSSR_TX_STATUS_REG));
  1140 + IWL_ERR(trans,
  1141 + "Failing on timeout while stopping DMA channel %d [0x%08x]",
  1142 + ch,
  1143 + iwl_read_direct32(trans,
  1144 + FH_TSSR_TX_STATUS_REG));
1151 1145 }
1152 1146 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1153 1147  
1154 1148  
... ... @@ -1166,8 +1160,8 @@
1166 1160  
1167 1161 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1168 1162 {
1169   - unsigned long flags;
1170 1163 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1164 + unsigned long flags;
1171 1165  
1172 1166 /* tell the device to stop sending interrupts */
1173 1167 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
... ... @@ -1197,7 +1191,7 @@
1197 1191  
1198 1192 /* Make sure (redundant) we've released our request to stay awake */
1199 1193 iwl_clear_bit(trans, CSR_GP_CNTRL,
1200   - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  1194 + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1201 1195  
1202 1196 /* Stop the device, and put it in low power state */
1203 1197 iwl_apm_stop(trans);
... ... @@ -1271,8 +1265,9 @@
1271 1265 txq->entries[q->write_ptr].cmd = dev_cmd;
1272 1266  
1273 1267 dev_cmd->hdr.cmd = REPLY_TX;
1274   - dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1275   - INDEX_TO_SEQ(q->write_ptr)));
  1268 + dev_cmd->hdr.sequence =
  1269 + cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
  1270 + INDEX_TO_SEQ(q->write_ptr)));
1276 1271  
1277 1272 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1278 1273 out_meta = &txq->entries[q->write_ptr].meta;
... ... @@ -1337,7 +1332,7 @@
1337 1332  
1338 1333 /* take back ownership of DMA buffer to enable update */
1339 1334 dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
1340   - DMA_BIDIRECTIONAL);
  1335 + DMA_BIDIRECTIONAL);
1341 1336 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1342 1337 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1343 1338  
... ... @@ -1349,7 +1344,7 @@
1349 1344 iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1350 1345  
1351 1346 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
1352   - DMA_BIDIRECTIONAL);
  1347 + DMA_BIDIRECTIONAL);
1353 1348  
1354 1349 trace_iwlwifi_dev_tx(trans->dev,
1355 1350 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
... ... @@ -1388,8 +1383,7 @@
1388 1383  
1389 1384 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1390 1385 {
1391   - struct iwl_trans_pcie *trans_pcie =
1392   - IWL_TRANS_GET_PCIE_TRANS(trans);
  1386 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1393 1387 int err;
1394 1388 bool hw_rfkill;
1395 1389  
... ... @@ -1402,7 +1396,7 @@
1402 1396 iwl_alloc_isr_ict(trans);
1403 1397  
1404 1398 err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
1405   - DRV_NAME, trans);
  1399 + DRV_NAME, trans);
1406 1400 if (err) {
1407 1401 IWL_ERR(trans, "Error allocating IRQ %d\n",
1408 1402 trans_pcie->irq);
1409 1403  
... ... @@ -1440,9 +1434,9 @@
1440 1434 static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
1441 1435 bool op_mode_leaving)
1442 1436 {
  1437 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1443 1438 bool hw_rfkill;
1444 1439 unsigned long flags;
1445   - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1446 1440  
1447 1441 iwl_apm_stop(trans);
1448 1442  
... ... @@ -1546,8 +1540,7 @@
1546 1540  
1547 1541 void iwl_trans_pcie_free(struct iwl_trans *trans)
1548 1542 {
1549   - struct iwl_trans_pcie *trans_pcie =
1550   - IWL_TRANS_GET_PCIE_TRANS(trans);
  1543 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1551 1544  
1552 1545 iwl_trans_pcie_tx_free(trans);
1553 1546 #ifndef CONFIG_IWLWIFI_IDI
... ... @@ -1809,8 +1802,8 @@
1809 1802 };
1810 1803  
1811 1804 static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1812   - char __user *user_buf,
1813   - size_t count, loff_t *ppos)
  1805 + char __user *user_buf,
  1806 + size_t count, loff_t *ppos)
1814 1807 {
1815 1808 struct iwl_trans *trans = file->private_data;
1816 1809 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1817 1810  
... ... @@ -1846,11 +1839,11 @@
1846 1839 }
1847 1840  
1848 1841 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1849   - char __user *user_buf,
1850   - size_t count, loff_t *ppos) {
  1842 + char __user *user_buf,
  1843 + size_t count, loff_t *ppos)
  1844 +{
1851 1845 struct iwl_trans *trans = file->private_data;
1852   - struct iwl_trans_pcie *trans_pcie =
1853   - IWL_TRANS_GET_PCIE_TRANS(trans);
  1846 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1854 1847 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1855 1848 char buf[256];
1856 1849 int pos = 0;
1857 1850  
... ... @@ -1874,11 +1867,10 @@
1874 1867  
1875 1868 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1876 1869 char __user *user_buf,
1877   - size_t count, loff_t *ppos) {
1878   -
  1870 + size_t count, loff_t *ppos)
  1871 +{
1879 1872 struct iwl_trans *trans = file->private_data;
1880   - struct iwl_trans_pcie *trans_pcie =
1881   - IWL_TRANS_GET_PCIE_TRANS(trans);
  1873 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1882 1874 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1883 1875  
1884 1876 int pos = 0;
... ... @@ -1936,8 +1928,7 @@
1936 1928 size_t count, loff_t *ppos)
1937 1929 {
1938 1930 struct iwl_trans *trans = file->private_data;
1939   - struct iwl_trans_pcie *trans_pcie =
1940   - IWL_TRANS_GET_PCIE_TRANS(trans);
  1931 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1941 1932 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1942 1933  
1943 1934 char buf[8];
... ... @@ -1957,8 +1948,8 @@
1957 1948 }
1958 1949  
1959 1950 static ssize_t iwl_dbgfs_csr_write(struct file *file,
1960   - const char __user *user_buf,
1961   - size_t count, loff_t *ppos)
  1951 + const char __user *user_buf,
  1952 + size_t count, loff_t *ppos)
1962 1953 {
1963 1954 struct iwl_trans *trans = file->private_data;
1964 1955 char buf[8];
... ... @@ -1978,8 +1969,8 @@
1978 1969 }
1979 1970  
1980 1971 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1981   - char __user *user_buf,
1982   - size_t count, loff_t *ppos)
  1972 + char __user *user_buf,
  1973 + size_t count, loff_t *ppos)
1983 1974 {
1984 1975 struct iwl_trans *trans = file->private_data;
1985 1976 char *buf;
... ... @@ -2022,7 +2013,7 @@
2022 2013 *
2023 2014 */
2024 2015 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2025   - struct dentry *dir)
  2016 + struct dentry *dir)
2026 2017 {
2027 2018 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
2028 2019 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
... ... @@ -2034,9 +2025,10 @@
2034 2025 }
2035 2026 #else
2036 2027 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2037   - struct dentry *dir)
2038   -{ return 0; }
2039   -
  2028 + struct dentry *dir)
  2029 +{
  2030 + return 0;
  2031 +}
2040 2032 #endif /*CONFIG_IWLWIFI_DEBUGFS */
2041 2033  
2042 2034 static const struct iwl_trans_ops trans_ops_pcie = {
... ... @@ -2081,7 +2073,7 @@
2081 2073 int err;
2082 2074  
2083 2075 trans = kzalloc(sizeof(struct iwl_trans) +
2084   - sizeof(struct iwl_trans_pcie), GFP_KERNEL);
  2076 + sizeof(struct iwl_trans_pcie), GFP_KERNEL);
2085 2077  
2086 2078 if (WARN_ON(!trans))
2087 2079 return NULL;
... ... @@ -2097,7 +2089,7 @@
2097 2089 /* W/A - seems to solve weird behavior. We need to remove this if we
2098 2090 * don't want to stay in L1 all the time. This wastes a lot of power */
2099 2091 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
2100   - PCIE_LINK_STATE_CLKPM);
  2092 + PCIE_LINK_STATE_CLKPM);
2101 2093  
2102 2094 if (pci_enable_device(pdev)) {
2103 2095 err = -ENODEV;
... ... @@ -2113,7 +2105,7 @@
2113 2105 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2114 2106 if (!err)
2115 2107 err = pci_set_consistent_dma_mask(pdev,
2116   - DMA_BIT_MASK(32));
  2108 + DMA_BIT_MASK(32));
2117 2109 /* both attempts failed: */
2118 2110 if (err) {
2119 2111 dev_printk(KERN_ERR, &pdev->dev,
2120 2112  
2121 2113  
... ... @@ -2136,13 +2128,13 @@
2136 2128 }
2137 2129  
2138 2130 dev_printk(KERN_INFO, &pdev->dev,
2139   - "pci_resource_len = 0x%08llx\n",
2140   - (unsigned long long) pci_resource_len(pdev, 0));
  2131 + "pci_resource_len = 0x%08llx\n",
  2132 + (unsigned long long) pci_resource_len(pdev, 0));
2141 2133 dev_printk(KERN_INFO, &pdev->dev,
2142   - "pci_resource_base = %p\n", trans_pcie->hw_base);
  2134 + "pci_resource_base = %p\n", trans_pcie->hw_base);
2143 2135  
2144 2136 dev_printk(KERN_INFO, &pdev->dev,
2145   - "HW Revision ID = 0x%X\n", pdev->revision);
  2137 + "HW Revision ID = 0x%X\n", pdev->revision);
2146 2138  
2147 2139 /* We disable the RETRY_TIMEOUT register (0x41) to keep
2148 2140 * PCI Tx retries from interfering with C3 CPU state */
... ... @@ -2151,7 +2143,7 @@
2151 2143 err = pci_enable_msi(pdev);
2152 2144 if (err)
2153 2145 dev_printk(KERN_ERR, &pdev->dev,
2154   - "pci_enable_msi failed(0X%x)", err);
  2146 + "pci_enable_msi failed(0X%x)", err);
2155 2147  
2156 2148 trans->dev = &pdev->dev;
2157 2149 trans_pcie->irq = pdev->irq;