Commit 34a991587a5cc9f78960c2c9beea217866458c41

Authored by Dan Williams
1 parent 89a7301f21

isci: kill 'get/set' macros

Most of these simple dereference macros are longer than their open coded
equivalent.  Deleting enum sci_controller_mode is thrown in for good
measure.

Reported-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

Showing 15 changed files with 188 additions and 478 deletions Side-by-side Diff

drivers/scsi/isci/host.c
... ... @@ -2627,7 +2627,7 @@
2627 2627 return status;
2628 2628  
2629 2629 set_bit(IREQ_ACTIVE, &ireq->flags);
2630   - sci_controller_post_request(ihost, sci_request_get_post_context(ireq));
  2630 + sci_controller_post_request(ihost, ireq->post_context);
2631 2631 return SCI_SUCCESS;
2632 2632 }
2633 2633  
... ... @@ -2707,7 +2707,7 @@
2707 2707 }
2708 2708  
2709 2709 set_bit(IREQ_ACTIVE, &ireq->flags);
2710   - sci_controller_post_request(ihost, sci_request_get_post_context(ireq));
  2710 + sci_controller_post_request(ihost, ireq->post_context);
2711 2711 return SCI_SUCCESS;
2712 2712 }
2713 2713  
... ... @@ -2747,9 +2747,7 @@
2747 2747 return SCI_SUCCESS;
2748 2748 case SCI_SUCCESS:
2749 2749 set_bit(IREQ_ACTIVE, &ireq->flags);
2750   -
2751   - sci_controller_post_request(ihost,
2752   - sci_request_get_post_context(ireq));
  2750 + sci_controller_post_request(ihost, ireq->post_context);
2753 2751 break;
2754 2752 default:
2755 2753 break;
drivers/scsi/isci/host.h
... ... @@ -172,6 +172,7 @@
172 172 /* XXX kill */
173 173 bool phy_startup_timer_pending;
174 174 u32 next_phy_to_start;
  175 + /* XXX convert to unsigned long and use bitops */
175 176 u8 invalid_phy_mask;
176 177  
177 178 /* TODO attempt dynamic interrupt coalescing scheme */
... ... @@ -359,13 +360,8 @@
359 360 return dev->port->ha->lldd_ha;
360 361 }
361 362  
362   -/**
363   - * sci_controller_get_protocol_engine_group() -
364   - *
365   - * This macro returns the protocol engine group for this controller object.
366   - * Presently we only support protocol engine group 0 so just return that
367   - */
368   -#define sci_controller_get_protocol_engine_group(controller) 0
  363 +/* we always use protocol engine group zero */
  364 +#define ISCI_PEG 0
369 365  
370 366 /* see sci_controller_io_tag_allocate|free for how seq and tci are built */
371 367 #define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci)
... ... @@ -384,16 +380,6 @@
384 380 return SCU_STP_REMOTE_NODE_COUNT;
385 381 return SCU_SSP_REMOTE_NODE_COUNT;
386 382 }
387   -
388   -/**
389   - * sci_controller_set_invalid_phy() -
390   - *
391   - * This macro will set the bit in the invalid phy mask for this controller
392   - * object. This is used to control messages reported for invalid link up
393   - * notifications.
394   - */
395   -#define sci_controller_set_invalid_phy(controller, phy) \
396   - ((controller)->invalid_phy_mask |= (1 << (phy)->phy_index))
397 383  
398 384 /**
399 385 * sci_controller_clear_invalid_phy() -
drivers/scsi/isci/isci.h
... ... @@ -73,11 +73,6 @@
73 73  
74 74 #define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF
75 75  
76   -enum sci_controller_mode {
77   - SCI_MODE_SPEED,
78   - SCI_MODE_SIZE /* deprecated */
79   -};
80   -
81 76 #define SCI_MAX_PHYS (4UL)
82 77 #define SCI_MAX_PORTS SCI_MAX_PHYS
83 78 #define SCI_MAX_SMP_PHYS (384) /* not silicon constrained */
drivers/scsi/isci/phy.c
... ... @@ -265,10 +265,11 @@
265 265 * port (i.e. it's contained in the dummy port). !NULL All other
266 266 * values indicate a handle/pointer to the port containing the phy.
267 267 */
268   -struct isci_port *phy_get_non_dummy_port(
269   - struct isci_phy *iphy)
  268 +struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy)
270 269 {
271   - if (sci_port_get_index(iphy->owning_port) == SCIC_SDS_DUMMY_PORT)
  270 + struct isci_port *iport = iphy->owning_port;
  271 +
  272 + if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT)
272 273 return NULL;
273 274  
274 275 return iphy->owning_port;
... ... @@ -858,10 +859,9 @@
858 859 struct dev_to_host_fis *frame_header;
859 860 u32 *fis_frame_data;
860 861  
861   - result = sci_unsolicited_frame_control_get_header(
862   - &(sci_phy_get_controller(iphy)->uf_control),
863   - frame_index,
864   - (void **)&frame_header);
  862 + result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
  863 + frame_index,
  864 + (void **)&frame_header);
865 865  
866 866 if (result != SCI_SUCCESS)
867 867 return result;
... ... @@ -1090,6 +1090,8 @@
1090 1090 static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm)
1091 1091 {
1092 1092 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
  1093 + struct isci_port *iport = iphy->owning_port;
  1094 + struct isci_host *ihost = iport->owning_controller;
1093 1095  
1094 1096 /*
1095 1097 * @todo We need to get to the controller to place this PE in a
1096 1098  
... ... @@ -1100,14 +1102,14 @@
1100 1102 scu_link_layer_stop_protocol_engine(iphy);
1101 1103  
1102 1104 if (iphy->sm.previous_state_id != SCI_PHY_INITIAL)
1103   - sci_controller_link_down(sci_phy_get_controller(iphy),
1104   - phy_get_non_dummy_port(iphy),
1105   - iphy);
  1105 + sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
1106 1106 }
1107 1107  
1108 1108 static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
1109 1109 {
1110 1110 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
  1111 + struct isci_port *iport = iphy->owning_port;
  1112 + struct isci_host *ihost = iport->owning_controller;
1111 1113  
1112 1114 scu_link_layer_stop_protocol_engine(iphy);
1113 1115 scu_link_layer_start_oob(iphy);
... ... @@ -1117,9 +1119,7 @@
1117 1119 iphy->bcn_received_while_port_unassigned = false;
1118 1120  
1119 1121 if (iphy->sm.previous_state_id == SCI_PHY_READY)
1120   - sci_controller_link_down(sci_phy_get_controller(iphy),
1121   - phy_get_non_dummy_port(iphy),
1122   - iphy);
  1122 + sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
1123 1123  
1124 1124 sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL);
1125 1125 }
1126 1126  
... ... @@ -1127,11 +1127,10 @@
1127 1127 static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm)
1128 1128 {
1129 1129 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
  1130 + struct isci_port *iport = iphy->owning_port;
  1131 + struct isci_host *ihost = iport->owning_controller;
1130 1132  
1131   - sci_controller_link_up(sci_phy_get_controller(iphy),
1132   - phy_get_non_dummy_port(iphy),
1133   - iphy);
1134   -
  1133 + sci_controller_link_up(ihost, phy_get_non_dummy_port(iphy), iphy);
1135 1134 }
1136 1135  
1137 1136 static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm)
drivers/scsi/isci/phy.h
... ... @@ -440,23 +440,6 @@
440 440 SCI_PHY_FINAL,
441 441 };
442 442  
443   -/**
444   - * sci_phy_get_index() -
445   - *
446   - * This macro returns the phy index for the specified phy
447   - */
448   -#define sci_phy_get_index(phy) \
449   - ((phy)->phy_index)
450   -
451   -/**
452   - * sci_phy_get_controller() - This macro returns the controller for this
453   - * phy
454   - *
455   - *
456   - */
457   -#define sci_phy_get_controller(phy) \
458   - (sci_port_get_controller((phy)->owning_port))
459   -
460 443 void sci_phy_construct(
461 444 struct isci_phy *iphy,
462 445 struct isci_port *iport,
drivers/scsi/isci/port.c
... ... @@ -654,7 +654,7 @@
654 654 void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
655 655 bool do_notify_user)
656 656 {
657   - struct isci_host *ihost = sci_port_get_controller(iport);
  657 + struct isci_host *ihost = iport->owning_controller;
658 658  
659 659 iport->active_phy_mask &= ~(1 << iphy->phy_index);
660 660  
... ... @@ -678,7 +678,7 @@
678 678 * invalid link.
679 679 */
680 680 if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
681   - sci_controller_set_invalid_phy(ihost, iphy);
  681 + ihost->invalid_phy_mask |= 1 << iphy->phy_index;
682 682 dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
683 683 }
684 684 }
drivers/scsi/isci/port.h
... ... @@ -210,23 +210,6 @@
210 210  
211 211 };
212 212  
213   -/**
214   - * sci_port_get_controller() -
215   - *
216   - * Helper macro to get the owning controller of this port
217   - */
218   -#define sci_port_get_controller(this_port) \
219   - ((this_port)->owning_controller)
220   -
221   -/**
222   - * sci_port_get_index() -
223   - *
224   - * This macro returns the physical port index for this port object
225   - */
226   -#define sci_port_get_index(this_port) \
227   - ((this_port)->physical_port_index)
228   -
229   -
230 213 static inline void sci_port_decrement_request_count(struct isci_port *iport)
231 214 {
232 215 if (WARN_ONCE(iport->started_request_count == 0,
drivers/scsi/isci/port_config.c
... ... @@ -367,10 +367,10 @@
367 367 if (!iport)
368 368 return;
369 369  
370   - port_agent->phy_ready_mask |= (1 << sci_phy_get_index(iphy));
  370 + port_agent->phy_ready_mask |= (1 << iphy->phy_index);
371 371 sci_port_link_up(iport, iphy);
372   - if ((iport->active_phy_mask & (1 << sci_phy_get_index(iphy))))
373   - port_agent->phy_configured_mask |= (1 << sci_phy_get_index(iphy));
  372 + if ((iport->active_phy_mask & (1 << iphy->phy_index)))
  373 + port_agent->phy_configured_mask |= (1 << iphy->phy_index);
374 374 }
375 375  
376 376 /**
... ... @@ -404,10 +404,8 @@
404 404 * rebuilding the port with the phys that remain in the ready
405 405 * state.
406 406 */
407   - port_agent->phy_ready_mask &=
408   - ~(1 << sci_phy_get_index(iphy));
409   - port_agent->phy_configured_mask &=
410   - ~(1 << sci_phy_get_index(iphy));
  407 + port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
  408 + port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
411 409  
412 410 /*
413 411 * Check to see if there are more phys waiting to be
... ... @@ -643,7 +641,7 @@
643 641 struct isci_port *iport,
644 642 struct isci_phy *iphy)
645 643 {
646   - port_agent->phy_ready_mask &= ~(1 << sci_phy_get_index(iphy));
  644 + port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
647 645  
648 646 if (!iport)
649 647 return;
drivers/scsi/isci/remote_device.c
... ... @@ -456,7 +456,7 @@
456 456 sci_port_complete_io(iport, idev, ireq);
457 457 else {
458 458 kref_get(&idev->kref);
459   - sci_remote_device_increment_request_count(idev);
  459 + idev->started_request_count++;
460 460 }
461 461 }
462 462  
... ... @@ -636,7 +636,7 @@
636 636 * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
637 637 */
638 638 sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
639   - } else if (sci_remote_device_get_request_count(idev) == 0)
  639 + } else if (idev->started_request_count == 0)
640 640 sci_change_state(sm, SCI_STP_DEV_IDLE);
641 641 break;
642 642 case SCI_SMP_DEV_CMD:
643 643  
... ... @@ -650,10 +650,10 @@
650 650 if (status != SCI_SUCCESS)
651 651 break;
652 652  
653   - if (sci_remote_device_get_request_count(idev) == 0)
  653 + if (idev->started_request_count == 0)
654 654 sci_remote_node_context_destruct(&idev->rnc,
655   - rnc_destruct_done,
656   - idev);
  655 + rnc_destruct_done,
  656 + idev);
657 657 break;
658 658 }
659 659  
660 660  
661 661  
662 662  
... ... @@ -761,26 +761,17 @@
761 761 return status;
762 762 }
763 763  
764   -/**
765   - *
766   - * @sci_dev:
767   - * @request:
768   - *
769   - * This method takes the request and bulids an appropriate SCU context for the
770   - * request and then requests the controller to post the request. none
771   - */
772   -void sci_remote_device_post_request(
773   - struct isci_remote_device *idev,
774   - u32 request)
  764 +void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
775 765 {
  766 + struct isci_port *iport = idev->owning_port;
776 767 u32 context;
777 768  
778   - context = sci_remote_device_build_command_context(idev, request);
  769 + context = request |
  770 + (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
  771 + (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
  772 + idev->rnc.remote_node_index;
779 773  
780   - sci_controller_post_request(
781   - sci_remote_device_get_controller(idev),
782   - context
783   - );
  774 + sci_controller_post_request(iport->owning_controller, context);
784 775 }
785 776  
786 777 /* called once the remote node context has transisitioned to a
... ... @@ -893,7 +884,7 @@
893 884 static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
894 885 {
895 886 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
896   - struct isci_host *ihost = sci_remote_device_get_controller(idev);
  887 + struct isci_host *ihost = idev->owning_port->owning_controller;
897 888  
898 889 isci_remote_device_not_ready(ihost, idev,
899 890 SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
... ... @@ -961,7 +952,7 @@
961 952 static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
962 953 {
963 954 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
964   - struct isci_host *ihost = sci_remote_device_get_controller(idev);
  955 + struct isci_host *ihost = idev->owning_port->owning_controller;
965 956  
966 957 BUG_ON(idev->working_request == NULL);
967 958  
... ... @@ -972,7 +963,7 @@
972 963 static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
973 964 {
974 965 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
975   - struct isci_host *ihost = sci_remote_device_get_controller(idev);
  966 + struct isci_host *ihost = idev->owning_port->owning_controller;
976 967  
977 968 if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
978 969 isci_remote_device_not_ready(ihost, idev,
... ... @@ -982,7 +973,7 @@
982 973 static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
983 974 {
984 975 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
985   - struct isci_host *ihost = sci_remote_device_get_controller(idev);
  976 + struct isci_host *ihost = idev->owning_port->owning_controller;
986 977  
987 978 isci_remote_device_ready(ihost, idev);
988 979 }
... ... @@ -990,7 +981,7 @@
990 981 static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
991 982 {
992 983 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
993   - struct isci_host *ihost = sci_remote_device_get_controller(idev);
  984 + struct isci_host *ihost = idev->owning_port->owning_controller;
994 985  
995 986 BUG_ON(idev->working_request == NULL);
996 987  
drivers/scsi/isci/remote_device.h
... ... @@ -305,91 +305,18 @@
305 305 return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV;
306 306 }
307 307  
308   -/**
309   - * sci_remote_device_increment_request_count() -
310   - *
311   - * This macro incrments the request count for this device
312   - */
313   -#define sci_remote_device_increment_request_count(idev) \
314   - ((idev)->started_request_count++)
315   -
316   -/**
317   - * sci_remote_device_decrement_request_count() -
318   - *
319   - * This macro decrements the request count for this device. This count will
320   - * never decrment past 0.
321   - */
322   -#define sci_remote_device_decrement_request_count(idev) \
323   - ((idev)->started_request_count > 0 ? \
324   - (idev)->started_request_count-- : 0)
325   -
326   -/**
327   - * sci_remote_device_get_request_count() -
328   - *
329   - * This is a helper macro to return the current device request count.
330   - */
331   -#define sci_remote_device_get_request_count(idev) \
332   - ((idev)->started_request_count)
333   -
334   -/**
335   - * sci_remote_device_get_controller() -
336   - *
337   - * This macro returns the controller object that contains this device object
338   - */
339   -#define sci_remote_device_get_controller(idev) \
340   - sci_port_get_controller(sci_remote_device_get_port(idev))
341   -
342   -/**
343   - * sci_remote_device_get_port() -
344   - *
345   - * This macro returns the owning port of this device
346   - */
347   -#define sci_remote_device_get_port(idev) \
348   - ((idev)->owning_port)
349   -
350   -/**
351   - * sci_remote_device_get_controller_peg() -
352   - *
353   - * This macro returns the controllers protocol engine group
354   - */
355   -#define sci_remote_device_get_controller_peg(idev) \
356   - (\
357   - sci_controller_get_protocol_engine_group(\
358   - sci_port_get_controller(\
359   - sci_remote_device_get_port(idev) \
360   - ) \
361   - ) \
362   - )
363   -
364   -/**
365   - * sci_remote_device_get_index() -
366   - *
367   - * This macro returns the remote node index for this device object
368   - */
369   -#define sci_remote_device_get_index(idev) \
370   - ((idev)->rnc.remote_node_index)
371   -
372   -/**
373   - * sci_remote_device_build_command_context() -
374   - *
375   - * This macro builds a remote device context for the SCU post request operation
376   - */
377   -#define sci_remote_device_build_command_context(device, command) \
378   - ((command) \
379   - | (sci_remote_device_get_controller_peg((device)) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) \
380   - | ((device)->owning_port->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) \
381   - | (sci_remote_device_get_index((device))) \
382   - )
383   -
384   -/**
385   - * sci_remote_device_set_working_request() -
386   - *
387   - * This macro makes the working request assingment for the remote device
388   - * object. To clear the working request use this macro with a NULL request
389   - * object.
390   - */
391   -#define sci_remote_device_set_working_request(device, request) \
392   - ((device)->working_request = (request))
  308 +static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
  309 +{
  310 + /* XXX delete this voodoo when converting to the top-level device
  311 + * reference count
  312 + */
  313 + if (WARN_ONCE(idev->started_request_count == 0,
  314 + "%s: tried to decrement started_request_count past 0!?",
  315 + __func__))
  316 + /* pass */;
  317 + else
  318 + idev->started_request_count--;
  319 +}
393 320  
394 321 enum sci_status sci_remote_device_frame_handler(
395 322 struct isci_remote_device *idev,
drivers/scsi/isci/remote_node_context.c
... ... @@ -111,7 +111,7 @@
111 111 struct isci_host *ihost;
112 112 __le64 sas_addr;
113 113  
114   - ihost = sci_remote_device_get_controller(idev);
  114 + ihost = idev->owning_port->owning_controller;
115 115 rnc = sci_rnc_by_id(ihost, rni);
116 116  
117 117 memset(rnc, 0, sizeof(union scu_remote_node_context)
drivers/scsi/isci/remote_node_context.h
... ... @@ -204,9 +204,6 @@
204 204 bool sci_remote_node_context_is_ready(
205 205 struct sci_remote_node_context *sci_rnc);
206 206  
207   -#define sci_remote_node_context_get_remote_node_index(rcn) \
208   - ((rnc)->remote_node_index)
209   -
210 207 enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
211 208 u32 event_code);
212 209 enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
drivers/scsi/isci/request.c
... ... @@ -211,22 +211,21 @@
211 211 struct isci_remote_device *idev;
212 212 struct isci_port *iport;
213 213  
214   - idev = sci_request_get_device(ireq);
215   - iport = sci_request_get_port(ireq);
  214 + idev = ireq->target_device;
  215 + iport = idev->owning_port;
216 216  
217 217 /* Fill in the TC with the its required data */
218 218 task_context->abort = 0;
219 219 task_context->priority = 0;
220 220 task_context->initiator_request = 1;
221 221 task_context->connection_rate = idev->connection_rate;
222   - task_context->protocol_engine_index =
223   - sci_controller_get_protocol_engine_group(controller);
224   - task_context->logical_port_index = sci_port_get_index(iport);
  222 + task_context->protocol_engine_index = ISCI_PEG;
  223 + task_context->logical_port_index = iport->physical_port_index;
225 224 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
226 225 task_context->valid = SCU_TASK_CONTEXT_VALID;
227 226 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
228 227  
229   - task_context->remote_node_index = sci_remote_device_get_index(idev);
  228 + task_context->remote_node_index = idev->rnc.remote_node_index;
230 229 task_context->command_code = 0;
231 230  
232 231 task_context->link_layer_control = 0;
... ... @@ -242,9 +241,8 @@
242 241 task_context->task_phase = 0x01;
243 242  
244 243 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
245   - (sci_controller_get_protocol_engine_group(controller) <<
246   - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
247   - (sci_port_get_index(iport) <<
  244 + (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
  245 + (iport->physical_port_index <<
248 246 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
249 247 ISCI_TAG_TCI(ireq->io_tag));
250 248  
251 249  
252 250  
... ... @@ -349,23 +347,21 @@
349 347 struct isci_remote_device *idev;
350 348 struct isci_port *iport;
351 349  
352   - idev = sci_request_get_device(ireq);
353   - iport = sci_request_get_port(ireq);
  350 + idev = ireq->target_device;
  351 + iport = idev->owning_port;
354 352  
355 353 /* Fill in the TC with the its required data */
356 354 task_context->abort = 0;
357 355 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
358 356 task_context->initiator_request = 1;
359 357 task_context->connection_rate = idev->connection_rate;
360   - task_context->protocol_engine_index =
361   - sci_controller_get_protocol_engine_group(controller);
362   - task_context->logical_port_index =
363   - sci_port_get_index(iport);
  358 + task_context->protocol_engine_index = ISCI_PEG;
  359 + task_context->logical_port_index = iport->physical_port_index;
364 360 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
365 361 task_context->valid = SCU_TASK_CONTEXT_VALID;
366 362 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
367 363  
368   - task_context->remote_node_index = sci_remote_device_get_index(idev);
  364 + task_context->remote_node_index = idev->rnc.remote_node_index;
369 365 task_context->command_code = 0;
370 366  
371 367 task_context->link_layer_control = 0;
... ... @@ -385,11 +381,10 @@
385 381 task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
386 382  
387 383 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
388   - (sci_controller_get_protocol_engine_group(controller) <<
389   - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
390   - (sci_port_get_index(iport) <<
391   - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
392   - ISCI_TAG_TCI(ireq->io_tag));
  384 + (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
  385 + (iport->physical_port_index <<
  386 + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
  387 + ISCI_TAG_TCI(ireq->io_tag));
393 388 /*
394 389 * Copy the physical address for the command buffer to the SCU Task
395 390 * Context. We must offset the command buffer by 4 bytes because the
... ... @@ -716,10 +711,8 @@
716 711  
717 712 switch (state) {
718 713 case SCI_REQ_CONSTRUCTED:
719   - sci_request_set_status(ireq,
720   - SCU_TASK_DONE_TASK_ABORT,
721   - SCI_FAILURE_IO_TERMINATED);
722   -
  714 + ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
  715 + ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
723 716 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
724 717 return SCI_SUCCESS;
725 718 case SCI_REQ_STARTED:
... ... @@ -848,9 +841,8 @@
848 841 */
849 842 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
850 843 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
851   - sci_request_set_status(ireq,
852   - SCU_TASK_DONE_GOOD,
853   - SCI_SUCCESS);
  844 + ireq->scu_status = SCU_TASK_DONE_GOOD;
  845 + ireq->sci_status = SCI_SUCCESS;
854 846 break;
855 847 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
856 848 /* There are times when the SCU hardware will return an early
857 849  
... ... @@ -868,13 +860,11 @@
868 860 word_cnt);
869 861  
870 862 if (resp->status == 0) {
871   - sci_request_set_status(ireq,
872   - SCU_TASK_DONE_GOOD,
873   - SCI_SUCCESS_IO_DONE_EARLY);
  863 + ireq->scu_status = SCU_TASK_DONE_GOOD;
  864 + ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
874 865 } else {
875   - sci_request_set_status(ireq,
876   - SCU_TASK_DONE_CHECK_RESPONSE,
877   - SCI_FAILURE_IO_RESPONSE_VALID);
  866 + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
  867 + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
878 868 }
879 869 break;
880 870 }
... ... @@ -885,9 +875,8 @@
885 875 &ireq->ssp.rsp,
886 876 word_cnt);
887 877  
888   - sci_request_set_status(ireq,
889   - SCU_TASK_DONE_CHECK_RESPONSE,
890   - SCI_FAILURE_IO_RESPONSE_VALID);
  878 + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
  879 + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
891 880 break;
892 881 }
893 882  
... ... @@ -900,13 +889,12 @@
900 889 datapres = resp_iu->datapres;
901 890  
902 891 if (datapres == 1 || datapres == 2) {
903   - sci_request_set_status(ireq,
904   - SCU_TASK_DONE_CHECK_RESPONSE,
905   - SCI_FAILURE_IO_RESPONSE_VALID);
906   - } else
907   - sci_request_set_status(ireq,
908   - SCU_TASK_DONE_GOOD,
909   - SCI_SUCCESS);
  892 + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
  893 + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
  894 + } else {
  895 + ireq->scu_status = SCU_TASK_DONE_GOOD;
  896 + ireq->sci_status = SCI_SUCCESS;
  897 + }
910 898 break;
911 899 /* only stp device gets suspended. */
912 900 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
913 901  
... ... @@ -921,15 +909,13 @@
921 909 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
922 910 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
923 911 if (ireq->protocol == SCIC_STP_PROTOCOL) {
924   - sci_request_set_status(ireq,
925   - SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
926   - SCU_COMPLETION_TL_STATUS_SHIFT,
927   - SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
  912 + ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
  913 + SCU_COMPLETION_TL_STATUS_SHIFT;
  914 + ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
928 915 } else {
929   - sci_request_set_status(ireq,
930   - SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
931   - SCU_COMPLETION_TL_STATUS_SHIFT,
932   - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
  916 + ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
  917 + SCU_COMPLETION_TL_STATUS_SHIFT;
  918 + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
933 919 }
934 920 break;
935 921  
... ... @@ -944,10 +930,9 @@
944 930 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
945 931 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
946 932 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
947   - sci_request_set_status(ireq,
948   - SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
949   - SCU_COMPLETION_TL_STATUS_SHIFT,
950   - SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
  933 + ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
  934 + SCU_COMPLETION_TL_STATUS_SHIFT;
  935 + ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
951 936 break;
952 937  
953 938 /* neither ssp nor stp gets suspended. */
... ... @@ -967,11 +952,9 @@
967 952 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
968 953 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
969 954 default:
970   - sci_request_set_status(
971   - ireq,
972   - SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
973   - SCU_COMPLETION_TL_STATUS_SHIFT,
974   - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
  955 + ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
  956 + SCU_COMPLETION_TL_STATUS_SHIFT;
  957 + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
975 958 break;
976 959 }
977 960  
... ... @@ -991,9 +974,8 @@
991 974 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
992 975 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
993 976 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
994   - sci_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT,
995   - SCI_FAILURE_IO_TERMINATED);
996   -
  977 + ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
  978 + ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
997 979 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
998 980 break;
999 981  
... ... @@ -1012,9 +994,8 @@
1012 994 {
1013 995 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1014 996 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1015   - sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1016   - SCI_SUCCESS);
1017   -
  997 + ireq->scu_status = SCU_TASK_DONE_GOOD;
  998 + ireq->sci_status = SCI_SUCCESS;
1018 999 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1019 1000 break;
1020 1001 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
... ... @@ -1036,10 +1017,8 @@
1036 1017 * If a NAK was received, then it is up to the user to retry
1037 1018 * the request.
1038 1019 */
1039   - sci_request_set_status(ireq,
1040   - SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1041   - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1042   -
  1020 + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
  1021 + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1043 1022 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1044 1023 break;
1045 1024 }
1046 1025  
... ... @@ -1057,12 +1036,10 @@
1057 1036 * unexpected. but if the TC has success status, we
1058 1037 * complete the IO anyway.
1059 1038 */
1060   - sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1061   - SCI_SUCCESS);
1062   -
  1039 + ireq->scu_status = SCU_TASK_DONE_GOOD;
  1040 + ireq->sci_status = SCI_SUCCESS;
1063 1041 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1064 1042 break;
1065   -
1066 1043 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1067 1044 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1068 1045 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1069 1046  
1070 1047  
... ... @@ -1074,20 +1051,16 @@
1074 1051 * these SMP_XXX_XX_ERR status. For these type of error,
1075 1052 * we ask ihost user to retry the request.
1076 1053 */
1077   - sci_request_set_status(ireq, SCU_TASK_DONE_SMP_RESP_TO_ERR,
1078   - SCI_FAILURE_RETRY_REQUIRED);
1079   -
  1054 + ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
  1055 + ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
1080 1056 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1081 1057 break;
1082   -
1083 1058 default:
1084 1059 /* All other completion status cause the IO to be complete. If a NAK
1085 1060 * was received, then it is up to the user to retry the request
1086 1061 */
1087   - sci_request_set_status(ireq,
1088   - SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1089   - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1090   -
  1062 + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
  1063 + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1091 1064 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1092 1065 break;
1093 1066 }
... ... @@ -1101,9 +1074,8 @@
1101 1074 {
1102 1075 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1103 1076 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1104   - sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1105   - SCI_SUCCESS);
1106   -
  1077 + ireq->scu_status = SCU_TASK_DONE_GOOD;
  1078 + ireq->sci_status = SCI_SUCCESS;
1107 1079 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1108 1080 break;
1109 1081 default:
... ... @@ -1111,10 +1083,8 @@
1111 1083 * complete. If a NAK was received, then it is up to
1112 1084 * the user to retry the request.
1113 1085 */
1114   - sci_request_set_status(ireq,
1115   - SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1116   - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1117   -
  1086 + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
  1087 + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1118 1088 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1119 1089 break;
1120 1090 }
... ... @@ -1171,9 +1141,8 @@
1171 1141 {
1172 1142 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1173 1143 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1174   - sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1175   - SCI_SUCCESS);
1176   -
  1144 + ireq->scu_status = SCU_TASK_DONE_GOOD;
  1145 + ireq->sci_status = SCI_SUCCESS;
1177 1146 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
1178 1147 break;
1179 1148  
... ... @@ -1182,10 +1151,8 @@
1182 1151 * complete. If a NAK was received, then it is up to
1183 1152 * the user to retry the request.
1184 1153 */
1185   - sci_request_set_status(ireq,
1186   - SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1187   - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1188   -
  1154 + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
  1155 + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1189 1156 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1190 1157 break;
1191 1158 }
... ... @@ -1363,10 +1330,8 @@
1363 1330  
1364 1331 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1365 1332 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1366   - sci_request_set_status(ireq,
1367   - SCU_TASK_DONE_GOOD,
1368   - SCI_SUCCESS);
1369   -
  1333 + ireq->scu_status = SCU_TASK_DONE_GOOD;
  1334 + ireq->sci_status = SCI_SUCCESS;
1370 1335 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1371 1336 break;
1372 1337  
... ... @@ -1375,10 +1340,8 @@
1375 1340 * complete. If a NAK was received, then it is up to
1376 1341 * the user to retry the request.
1377 1342 */
1378   - sci_request_set_status(ireq,
1379   - SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1380   - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1381   -
  1343 + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
  1344 + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1382 1345 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1383 1346 break;
1384 1347 }
... ... @@ -1426,11 +1389,8 @@
1426 1389 * If a NAK was received, then it is up to the user to retry
1427 1390 * the request.
1428 1391 */
1429   - sci_request_set_status(
1430   - ireq,
1431   - SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1432   - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1433   -
  1392 + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
  1393 + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1434 1394 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1435 1395 break;
1436 1396 }
... ... @@ -1438,15 +1398,6 @@
1438 1398 return status;
1439 1399 }
1440 1400  
1441   -static void sci_stp_request_udma_complete_request(
1442   - struct isci_request *ireq,
1443   - u32 scu_status,
1444   - enum sci_status sci_status)
1445   -{
1446   - sci_request_set_status(ireq, scu_status, sci_status);
1447   - sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1448   -}
1449   -
1450 1401 static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
1451 1402 u32 frame_index)
1452 1403 {
... ... @@ -1512,13 +1463,12 @@
1512 1463  
1513 1464 if (resp_iu->datapres == 0x01 ||
1514 1465 resp_iu->datapres == 0x02) {
1515   - sci_request_set_status(ireq,
1516   - SCU_TASK_DONE_CHECK_RESPONSE,
1517   - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1518   - } else
1519   - sci_request_set_status(ireq,
1520   - SCU_TASK_DONE_GOOD,
1521   - SCI_SUCCESS);
  1466 + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
  1467 + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
  1468 + } else {
  1469 + ireq->scu_status = SCU_TASK_DONE_GOOD;
  1470 + ireq->sci_status = SCI_SUCCESS;
  1471 + }
1522 1472 } else {
1523 1473 /* not a response frame, why did it get forwarded? */
1524 1474 dev_err(&ihost->pdev->dev,
... ... @@ -1567,9 +1517,8 @@
1567 1517 sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
1568 1518 smp_resp, word_cnt);
1569 1519  
1570   - sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1571   - SCI_SUCCESS);
1572   -
  1520 + ireq->scu_status = SCU_TASK_DONE_GOOD;
  1521 + ireq->sci_status = SCI_SUCCESS;
1573 1522 sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
1574 1523 } else {
1575 1524 /*
... ... @@ -1584,10 +1533,8 @@
1584 1533 frame_index,
1585 1534 rsp_hdr->frame_type);
1586 1535  
1587   - sci_request_set_status(ireq,
1588   - SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
1589   - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1590   -
  1536 + ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
  1537 + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1591 1538 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1592 1539 }
1593 1540  
1594 1541  
... ... @@ -1602,16 +1549,14 @@
1602 1549  
1603 1550 case SCI_REQ_STP_UDMA_WAIT_D2H:
1604 1551 /* Use the general frame handler to copy the resposne data */
1605   - status = sci_stp_request_udma_general_frame_handler(ireq,
1606   - frame_index);
  1552 + status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
1607 1553  
1608 1554 if (status != SCI_SUCCESS)
1609 1555 return status;
1610 1556  
1611   - sci_stp_request_udma_complete_request(ireq,
1612   - SCU_TASK_DONE_CHECK_RESPONSE,
1613   - SCI_FAILURE_IO_RESPONSE_VALID);
1614   -
  1557 + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
  1558 + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
  1559 + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1615 1560 return SCI_SUCCESS;
1616 1561  
1617 1562 case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
... ... @@ -1645,8 +1590,8 @@
1645 1590 frame_buffer);
1646 1591  
1647 1592 /* The command has completed with error */
1648   - sci_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE,
1649   - SCI_FAILURE_IO_RESPONSE_VALID);
  1593 + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
  1594 + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1650 1595 break;
1651 1596  
1652 1597 default:
... ... @@ -1655,8 +1600,8 @@
1655 1600 "violation occurred\n", __func__, stp_req,
1656 1601 frame_index);
1657 1602  
1658   - sci_request_set_status(ireq, SCU_TASK_DONE_UNEXP_FIS,
1659   - SCI_FAILURE_PROTOCOL_VIOLATION);
  1603 + ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
  1604 + ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
1660 1605 break;
1661 1606 }
1662 1607  
... ... @@ -1753,10 +1698,8 @@
1753 1698 frame_header,
1754 1699 frame_buffer);
1755 1700  
1756   - sci_request_set_status(ireq,
1757   - SCU_TASK_DONE_CHECK_RESPONSE,
1758   - SCI_FAILURE_IO_RESPONSE_VALID);
1759   -
  1701 + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
  1702 + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1760 1703 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1761 1704 break;
1762 1705  
... ... @@ -1800,10 +1743,8 @@
1800 1743 frame_index,
1801 1744 frame_header->fis_type);
1802 1745  
1803   - sci_request_set_status(ireq,
1804   - SCU_TASK_DONE_GOOD,
1805   - SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
1806   -
  1746 + ireq->scu_status = SCU_TASK_DONE_GOOD;
  1747 + ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
1807 1748 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1808 1749  
1809 1750 /* Frame is decoded return it to the controller */
... ... @@ -1833,10 +1774,8 @@
1833 1774 return status;
1834 1775  
1835 1776 if ((stp_req->status & ATA_BUSY) == 0) {
1836   - sci_request_set_status(ireq,
1837   - SCU_TASK_DONE_CHECK_RESPONSE,
1838   - SCI_FAILURE_IO_RESPONSE_VALID);
1839   -
  1777 + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
  1778 + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1840 1779 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1841 1780 } else {
1842 1781 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
... ... @@ -1873,9 +1812,8 @@
1873 1812 frame_buffer);
1874 1813  
1875 1814 /* The command has completed with error */
1876   - sci_request_set_status(ireq,
1877   - SCU_TASK_DONE_CHECK_RESPONSE,
1878   - SCI_FAILURE_IO_RESPONSE_VALID);
  1815 + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
  1816 + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1879 1817 break;
1880 1818  
1881 1819 default:
... ... @@ -1886,9 +1824,8 @@
1886 1824 stp_req,
1887 1825 frame_index);
1888 1826  
1889   - sci_request_set_status(ireq,
1890   - SCU_TASK_DONE_UNEXP_FIS,
1891   - SCI_FAILURE_PROTOCOL_VIOLATION);
  1827 + ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
  1828 + ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
1892 1829 break;
1893 1830 }
1894 1831  
... ... @@ -1927,9 +1864,9 @@
1927 1864  
1928 1865 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1929 1866 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1930   - sci_stp_request_udma_complete_request(ireq,
1931   - SCU_TASK_DONE_GOOD,
1932   - SCI_SUCCESS);
  1867 + ireq->scu_status = SCU_TASK_DONE_GOOD;
  1868 + ireq->sci_status = SCI_SUCCESS;
  1869 + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1933 1870 break;
1934 1871 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
1935 1872 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
... ... @@ -1941,9 +1878,9 @@
1941 1878 sci_remote_device_suspend(ireq->target_device,
1942 1879 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1943 1880  
1944   - sci_stp_request_udma_complete_request(ireq,
1945   - SCU_TASK_DONE_CHECK_RESPONSE,
1946   - SCI_FAILURE_IO_RESPONSE_VALID);
  1881 + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
  1882 + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
  1883 + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1947 1884 } else {
1948 1885 /* If we have an error completion status for the
1949 1886 * TC then we can expect a D2H register FIS from
... ... @@ -1970,9 +1907,9 @@
1970 1907 /* Fall through to the default case */
1971 1908 default:
1972 1909 /* All other completion status cause the IO to be complete. */
1973   - sci_stp_request_udma_complete_request(ireq,
1974   - SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1975   - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
  1910 + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
  1911 + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
  1912 + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1976 1913 break;
1977 1914 }
1978 1915  
... ... @@ -1985,9 +1922,8 @@
1985 1922 {
1986 1923 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1987 1924 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1988   - sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
1989   - SCI_SUCCESS);
1990   -
  1925 + ireq->scu_status = SCU_TASK_DONE_GOOD;
  1926 + ireq->sci_status = SCI_SUCCESS;
1991 1927 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
1992 1928 break;
1993 1929  
... ... @@ -1997,10 +1933,8 @@
1997 1933 * If a NAK was received, then it is up to the user to retry
1998 1934 * the request.
1999 1935 */
2000   - sci_request_set_status(ireq,
2001   - SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2002   - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2003   -
  1936 + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
  1937 + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2004 1938 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2005 1939 break;
2006 1940 }
... ... @@ -2014,9 +1948,8 @@
2014 1948 {
2015 1949 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2016 1950 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2017   - sci_request_set_status(ireq, SCU_TASK_DONE_GOOD,
2018   - SCI_SUCCESS);
2019   -
  1951 + ireq->scu_status = SCU_TASK_DONE_GOOD;
  1952 + ireq->sci_status = SCI_SUCCESS;
2020 1953 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
2021 1954 break;
2022 1955  
... ... @@ -2025,10 +1958,8 @@
2025 1958 * a NAK was received, then it is up to the user to retry the
2026 1959 * request.
2027 1960 */
2028   - sci_request_set_status(ireq,
2029   - SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2030   - SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2031   -
  1961 + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
  1962 + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2032 1963 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2033 1964 break;
2034 1965 }
... ... @@ -2504,7 +2435,7 @@
2504 2435 completion_status);
2505 2436  
2506 2437 spin_lock(&request->state_lock);
2507   - request_status = isci_request_get_state(request);
  2438 + request_status = request->status;
2508 2439  
2509 2440 /* Decode the request status. Note that if the request has been
2510 2441 * aborted by a task management function, we don't care
2511 2442  
2512 2443  
... ... @@ -2904,24 +2835,21 @@
2904 2835 {
2905 2836 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2906 2837  
2907   - sci_remote_device_set_working_request(ireq->target_device,
2908   - ireq);
  2838 + ireq->target_device->working_request = ireq;
2909 2839 }
2910 2840  
2911 2841 static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
2912 2842 {
2913 2843 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2914 2844  
2915   - sci_remote_device_set_working_request(ireq->target_device,
2916   - ireq);
  2845 + ireq->target_device->working_request = ireq;
2917 2846 }
2918 2847  
2919 2848 static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
2920 2849 {
2921 2850 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2922 2851  
2923   - sci_remote_device_set_working_request(ireq->target_device,
2924   - ireq);
  2852 + ireq->target_device->working_request = ireq;
2925 2853 }
2926 2854  
2927 2855 static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
... ... @@ -3141,8 +3069,8 @@
3141 3069  
3142 3070 task_context = ireq->tc;
3143 3071  
3144   - idev = sci_request_get_device(ireq);
3145   - iport = sci_request_get_port(ireq);
  3072 + idev = ireq->target_device;
  3073 + iport = idev->owning_port;
3146 3074  
3147 3075 /*
3148 3076 * Fill in the TC with the its required data
... ... @@ -3151,9 +3079,8 @@
3151 3079 task_context->priority = 0;
3152 3080 task_context->initiator_request = 1;
3153 3081 task_context->connection_rate = idev->connection_rate;
3154   - task_context->protocol_engine_index =
3155   - sci_controller_get_protocol_engine_group(ihost);
3156   - task_context->logical_port_index = sci_port_get_index(iport);
  3082 + task_context->protocol_engine_index = ISCI_PEG;
  3083 + task_context->logical_port_index = iport->physical_port_index;
3157 3084 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3158 3085 task_context->abort = 0;
3159 3086 task_context->valid = SCU_TASK_CONTEXT_VALID;
... ... @@ -3195,11 +3122,10 @@
3195 3122 task_context->task_phase = 0;
3196 3123  
3197 3124 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3198   - (sci_controller_get_protocol_engine_group(ihost) <<
3199   - SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3200   - (sci_port_get_index(iport) <<
3201   - SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3202   - ISCI_TAG_TCI(ireq->io_tag));
  3125 + (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
  3126 + (iport->physical_port_index <<
  3127 + SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
  3128 + ISCI_TAG_TCI(ireq->io_tag));
3203 3129 /*
3204 3130 * Copy the physical address for the command buffer to the SCU Task
3205 3131 * Context command buffer should not contain command header.
drivers/scsi/isci/request.h
... ... @@ -300,58 +300,6 @@
300 300 SCI_REQ_FINAL,
301 301 };
302 302  
303   -/**
304   - * sci_request_get_controller() -
305   - *
306   - * This macro will return the controller for this io request object
307   - */
308   -#define sci_request_get_controller(ireq) \
309   - ((ireq)->owning_controller)
310   -
311   -/**
312   - * sci_request_get_device() -
313   - *
314   - * This macro will return the device for this io request object
315   - */
316   -#define sci_request_get_device(ireq) \
317   - ((ireq)->target_device)
318   -
319   -/**
320   - * sci_request_get_port() -
321   - *
322   - * This macro will return the port for this io request object
323   - */
324   -#define sci_request_get_port(ireq) \
325   - sci_remote_device_get_port(sci_request_get_device(ireq))
326   -
327   -/**
328   - * sci_request_get_post_context() -
329   - *
330   - * This macro returns the constructed post context result for the io request.
331   - */
332   -#define sci_request_get_post_context(ireq) \
333   - ((ireq)->post_context)
334   -
335   -/**
336   - * sci_request_get_task_context() -
337   - *
338   - * This is a helper macro to return the os handle for this request object.
339   - */
340   -#define sci_request_get_task_context(request) \
341   - ((request)->task_context_buffer)
342   -
343   -/**
344   - * sci_request_set_status() -
345   - *
346   - * This macro will set the scu hardware status and sci request completion
347   - * status for an io request.
348   - */
349   -#define sci_request_set_status(request, scu_status_code, sci_status_code) \
350   - { \
351   - (request)->scu_status = (scu_status_code); \
352   - (request)->sci_status = (sci_status_code); \
353   - }
354   -
355 303 enum sci_status sci_request_start(struct isci_request *ireq);
356 304 enum sci_status sci_io_request_terminate(struct isci_request *ireq);
357 305 enum sci_status
... ... @@ -380,27 +328,6 @@
380 328  
381 329 return ireq->request_daddr + (requested_addr - base_addr);
382 330 }
383   -
384   -/**
385   - * This function gets the status of the request object.
386   - * @request: This parameter points to the isci_request object
387   - *
388   - * status of the object as a isci_request_status enum.
389   - */
390   -static inline enum isci_request_status
391   -isci_request_get_state(struct isci_request *isci_request)
392   -{
393   - BUG_ON(isci_request == NULL);
394   -
395   - /*probably a bad sign... */
396   - if (isci_request->status == unallocated)
397   - dev_warn(&isci_request->isci_host->pdev->dev,
398   - "%s: isci_request->status == unallocated\n",
399   - __func__);
400   -
401   - return isci_request->status;
402   -}
403   -
404 331  
405 332 /**
406 333 * isci_request_change_state() - This function sets the status of the request
drivers/scsi/isci/task.c
... ... @@ -654,7 +654,7 @@
654 654 * needs to be detached and freed here.
655 655 */
656 656 spin_lock_irqsave(&isci_request->state_lock, flags);
657   - request_status = isci_request_get_state(isci_request);
  657 + request_status = isci_request->status;
658 658  
659 659 if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
660 660 && ((request_status == aborted)