Commit d70f9a3cc32ca515de4048adb544c997af9f2250
Committed by
Greg Kroah-Hartman
1 parent
475b8d6192
net: hns3: fix aRFS FD rules leftover after add a user FD rule
[ Upstream commit efe3fa45f770f1d66e2734ee7a3523c75694ff04 ] When user had created a FD rule, all the aRFS rules should be clear up. HNS3 process flow as below: 1.get spin lock of fd_ruls_list 2.clear up all aRFS rules 3.release lock 4.get spin lock of fd_ruls_list 5.creat a rules 6.release lock; There is a short period of time between step 3 and step 4, which would creatting some new aRFS FD rules if driver was receiving packet. So refactor the fd_rule_lock to fix it. Fixes: 441228875706 ("net: hns3: refine the flow director handle") Signed-off-by: Guojia Liao <liaoguojia@huawei.com> Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Sasha Levin <sashal@kernel.org>
Showing 1 changed file with 15 additions and 13 deletions Side-by-side Diff
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
... | ... | @@ -5627,9 +5627,9 @@ |
5627 | 5627 | /* to avoid rule conflict, when user configure rule by ethtool, |
5628 | 5628 | * we need to clear all arfs rules |
5629 | 5629 | */ |
5630 | + spin_lock_bh(&hdev->fd_rule_lock); | |
5630 | 5631 | hclge_clear_arfs_rules(handle); |
5631 | 5632 | |
5632 | - spin_lock_bh(&hdev->fd_rule_lock); | |
5633 | 5633 | ret = hclge_fd_config_rule(hdev, rule); |
5634 | 5634 | |
5635 | 5635 | spin_unlock_bh(&hdev->fd_rule_lock); |
... | ... | @@ -5672,6 +5672,7 @@ |
5672 | 5672 | return ret; |
5673 | 5673 | } |
5674 | 5674 | |
5675 | +/* make sure being called after lock up with fd_rule_lock */ | |
5675 | 5676 | static void hclge_del_all_fd_entries(struct hnae3_handle *handle, |
5676 | 5677 | bool clear_list) |
5677 | 5678 | { |
... | ... | @@ -5684,7 +5685,6 @@ |
5684 | 5685 | if (!hnae3_dev_fd_supported(hdev)) |
5685 | 5686 | return; |
5686 | 5687 | |
5687 | - spin_lock_bh(&hdev->fd_rule_lock); | |
5688 | 5688 | for_each_set_bit(location, hdev->fd_bmap, |
5689 | 5689 | hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) |
5690 | 5690 | hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, |
... | ... | @@ -5701,8 +5701,6 @@ |
5701 | 5701 | bitmap_zero(hdev->fd_bmap, |
5702 | 5702 | hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); |
5703 | 5703 | } |
5704 | - | |
5705 | - spin_unlock_bh(&hdev->fd_rule_lock); | |
5706 | 5704 | } |
5707 | 5705 | |
5708 | 5706 | static int hclge_restore_fd_entries(struct hnae3_handle *handle) |
... | ... | @@ -6069,7 +6067,7 @@ |
6069 | 6067 | u16 flow_id, struct flow_keys *fkeys) |
6070 | 6068 | { |
6071 | 6069 | struct hclge_vport *vport = hclge_get_vport(handle); |
6072 | - struct hclge_fd_rule_tuples new_tuples; | |
6070 | + struct hclge_fd_rule_tuples new_tuples = {}; | |
6073 | 6071 | struct hclge_dev *hdev = vport->back; |
6074 | 6072 | struct hclge_fd_rule *rule; |
6075 | 6073 | u16 tmp_queue_id; |
6076 | 6074 | |
6077 | 6075 | |
... | ... | @@ -6079,20 +6077,18 @@ |
6079 | 6077 | if (!hnae3_dev_fd_supported(hdev)) |
6080 | 6078 | return -EOPNOTSUPP; |
6081 | 6079 | |
6082 | - memset(&new_tuples, 0, sizeof(new_tuples)); | |
6083 | - hclge_fd_get_flow_tuples(fkeys, &new_tuples); | |
6084 | - | |
6085 | - spin_lock_bh(&hdev->fd_rule_lock); | |
6086 | - | |
6087 | 6080 | /* when there is already fd rule existed add by user, |
6088 | 6081 | * arfs should not work |
6089 | 6082 | */ |
6083 | + spin_lock_bh(&hdev->fd_rule_lock); | |
6090 | 6084 | if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { |
6091 | 6085 | spin_unlock_bh(&hdev->fd_rule_lock); |
6092 | 6086 | |
6093 | 6087 | return -EOPNOTSUPP; |
6094 | 6088 | } |
6095 | 6089 | |
6090 | + hclge_fd_get_flow_tuples(fkeys, &new_tuples); | |
6091 | + | |
6096 | 6092 | /* check is there flow director filter existed for this flow, |
6097 | 6093 | * if not, create a new filter for it; |
6098 | 6094 | * if filter exist with different queue id, modify the filter; |
... | ... | @@ -6177,6 +6173,7 @@ |
6177 | 6173 | #endif |
6178 | 6174 | } |
6179 | 6175 | |
6176 | +/* make sure being called after lock up with fd_rule_lock */ | |
6180 | 6177 | static void hclge_clear_arfs_rules(struct hnae3_handle *handle) |
6181 | 6178 | { |
6182 | 6179 | #ifdef CONFIG_RFS_ACCEL |
6183 | 6180 | |
6184 | 6181 | |
... | ... | @@ -6221,10 +6218,14 @@ |
6221 | 6218 | |
6222 | 6219 | hdev->fd_en = enable; |
6223 | 6220 | clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; |
6224 | - if (!enable) | |
6221 | + | |
6222 | + if (!enable) { | |
6223 | + spin_lock_bh(&hdev->fd_rule_lock); | |
6225 | 6224 | hclge_del_all_fd_entries(handle, clear); |
6226 | - else | |
6225 | + spin_unlock_bh(&hdev->fd_rule_lock); | |
6226 | + } else { | |
6227 | 6227 | hclge_restore_fd_entries(handle); |
6228 | + } | |
6228 | 6229 | } |
6229 | 6230 | |
6230 | 6231 | static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) |
6231 | 6232 | |
... | ... | @@ -6678,8 +6679,9 @@ |
6678 | 6679 | int i; |
6679 | 6680 | |
6680 | 6681 | set_bit(HCLGE_STATE_DOWN, &hdev->state); |
6681 | - | |
6682 | + spin_lock_bh(&hdev->fd_rule_lock); | |
6682 | 6683 | hclge_clear_arfs_rules(handle); |
6684 | + spin_unlock_bh(&hdev->fd_rule_lock); | |
6683 | 6685 | |
6684 | 6686 | /* If it is not PF reset, the firmware will disable the MAC, |
6685 | 6687 | * so it only need to stop phy here. |