Commit 43608372b84d7f0142ee8b2f56277a60f0f0a2a5
Committed by
Greg Kroah-Hartman
1 parent
8901896f69
net/mlx5: Verify Hardware supports requested ptp function on a given pin
[ Upstream commit 071995c877a8646209d55ff8edddd2b054e7424c ] Fix a bug where driver did not verify Hardware pin capabilities for PTP functions. Fixes: ee7f12205abc ("net/mlx5e: Implement 1PPS support") Signed-off-by: Eran Ben Elisha <eranbe@mellanox.com> Reviewed-by: Ariel Levkovich <lariel@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
Showing 1 changed file with 22 additions and 1 deletions Inline Diff
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
1 | /* | 1 | /* |
2 | * Copyright (c) 2015, Mellanox Technologies. All rights reserved. | 2 | * Copyright (c) 2015, Mellanox Technologies. All rights reserved. |
3 | * | 3 | * |
4 | * This software is available to you under a choice of one of two | 4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file | 6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the | 7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: | 8 | * OpenIB.org BSD license below: |
9 | * | 9 | * |
10 | * Redistribution and use in source and binary forms, with or | 10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following | 11 | * without modification, are permitted provided that the following |
12 | * conditions are met: | 12 | * conditions are met: |
13 | * | 13 | * |
14 | * - Redistributions of source code must retain the above | 14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following | 15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. | 16 | * disclaimer. |
17 | * | 17 | * |
18 | * - Redistributions in binary form must reproduce the above | 18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following | 19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials | 20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. | 21 | * provided with the distribution. |
22 | * | 22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. | 30 | * SOFTWARE. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <linux/clocksource.h> | 33 | #include <linux/clocksource.h> |
34 | #include <linux/highmem.h> | 34 | #include <linux/highmem.h> |
35 | #include <rdma/mlx5-abi.h> | 35 | #include <rdma/mlx5-abi.h> |
36 | #include "lib/eq.h" | 36 | #include "lib/eq.h" |
37 | #include "en.h" | 37 | #include "en.h" |
38 | #include "clock.h" | 38 | #include "clock.h" |
39 | 39 | ||
40 | enum { | 40 | enum { |
41 | MLX5_CYCLES_SHIFT = 23 | 41 | MLX5_CYCLES_SHIFT = 23 |
42 | }; | 42 | }; |
43 | 43 | ||
44 | enum { | 44 | enum { |
45 | MLX5_PIN_MODE_IN = 0x0, | 45 | MLX5_PIN_MODE_IN = 0x0, |
46 | MLX5_PIN_MODE_OUT = 0x1, | 46 | MLX5_PIN_MODE_OUT = 0x1, |
47 | }; | 47 | }; |
48 | 48 | ||
49 | enum { | 49 | enum { |
50 | MLX5_OUT_PATTERN_PULSE = 0x0, | 50 | MLX5_OUT_PATTERN_PULSE = 0x0, |
51 | MLX5_OUT_PATTERN_PERIODIC = 0x1, | 51 | MLX5_OUT_PATTERN_PERIODIC = 0x1, |
52 | }; | 52 | }; |
53 | 53 | ||
54 | enum { | 54 | enum { |
55 | MLX5_EVENT_MODE_DISABLE = 0x0, | 55 | MLX5_EVENT_MODE_DISABLE = 0x0, |
56 | MLX5_EVENT_MODE_REPETETIVE = 0x1, | 56 | MLX5_EVENT_MODE_REPETETIVE = 0x1, |
57 | MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2, | 57 | MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2, |
58 | }; | 58 | }; |
59 | 59 | ||
60 | enum { | 60 | enum { |
61 | MLX5_MTPPS_FS_ENABLE = BIT(0x0), | 61 | MLX5_MTPPS_FS_ENABLE = BIT(0x0), |
62 | MLX5_MTPPS_FS_PATTERN = BIT(0x2), | 62 | MLX5_MTPPS_FS_PATTERN = BIT(0x2), |
63 | MLX5_MTPPS_FS_PIN_MODE = BIT(0x3), | 63 | MLX5_MTPPS_FS_PIN_MODE = BIT(0x3), |
64 | MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4), | 64 | MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4), |
65 | MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5), | 65 | MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5), |
66 | MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7), | 66 | MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7), |
67 | }; | 67 | }; |
68 | 68 | ||
69 | static u64 read_internal_timer(const struct cyclecounter *cc) | 69 | static u64 read_internal_timer(const struct cyclecounter *cc) |
70 | { | 70 | { |
71 | struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles); | 71 | struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles); |
72 | struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, | 72 | struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, |
73 | clock); | 73 | clock); |
74 | 74 | ||
75 | return mlx5_read_internal_timer(mdev, NULL) & cc->mask; | 75 | return mlx5_read_internal_timer(mdev, NULL) & cc->mask; |
76 | } | 76 | } |
77 | 77 | ||
78 | static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev) | 78 | static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev) |
79 | { | 79 | { |
80 | struct mlx5_ib_clock_info *clock_info = mdev->clock_info; | 80 | struct mlx5_ib_clock_info *clock_info = mdev->clock_info; |
81 | struct mlx5_clock *clock = &mdev->clock; | 81 | struct mlx5_clock *clock = &mdev->clock; |
82 | u32 sign; | 82 | u32 sign; |
83 | 83 | ||
84 | if (!clock_info) | 84 | if (!clock_info) |
85 | return; | 85 | return; |
86 | 86 | ||
87 | sign = smp_load_acquire(&clock_info->sign); | 87 | sign = smp_load_acquire(&clock_info->sign); |
88 | smp_store_mb(clock_info->sign, | 88 | smp_store_mb(clock_info->sign, |
89 | sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING); | 89 | sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING); |
90 | 90 | ||
91 | clock_info->cycles = clock->tc.cycle_last; | 91 | clock_info->cycles = clock->tc.cycle_last; |
92 | clock_info->mult = clock->cycles.mult; | 92 | clock_info->mult = clock->cycles.mult; |
93 | clock_info->nsec = clock->tc.nsec; | 93 | clock_info->nsec = clock->tc.nsec; |
94 | clock_info->frac = clock->tc.frac; | 94 | clock_info->frac = clock->tc.frac; |
95 | 95 | ||
96 | smp_store_release(&clock_info->sign, | 96 | smp_store_release(&clock_info->sign, |
97 | sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2); | 97 | sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2); |
98 | } | 98 | } |
99 | 99 | ||
100 | static void mlx5_pps_out(struct work_struct *work) | 100 | static void mlx5_pps_out(struct work_struct *work) |
101 | { | 101 | { |
102 | struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps, | 102 | struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps, |
103 | out_work); | 103 | out_work); |
104 | struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock, | 104 | struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock, |
105 | pps_info); | 105 | pps_info); |
106 | struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, | 106 | struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, |
107 | clock); | 107 | clock); |
108 | u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; | 108 | u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; |
109 | unsigned long flags; | 109 | unsigned long flags; |
110 | int i; | 110 | int i; |
111 | 111 | ||
112 | for (i = 0; i < clock->ptp_info.n_pins; i++) { | 112 | for (i = 0; i < clock->ptp_info.n_pins; i++) { |
113 | u64 tstart; | 113 | u64 tstart; |
114 | 114 | ||
115 | write_seqlock_irqsave(&clock->lock, flags); | 115 | write_seqlock_irqsave(&clock->lock, flags); |
116 | tstart = clock->pps_info.start[i]; | 116 | tstart = clock->pps_info.start[i]; |
117 | clock->pps_info.start[i] = 0; | 117 | clock->pps_info.start[i] = 0; |
118 | write_sequnlock_irqrestore(&clock->lock, flags); | 118 | write_sequnlock_irqrestore(&clock->lock, flags); |
119 | if (!tstart) | 119 | if (!tstart) |
120 | continue; | 120 | continue; |
121 | 121 | ||
122 | MLX5_SET(mtpps_reg, in, pin, i); | 122 | MLX5_SET(mtpps_reg, in, pin, i); |
123 | MLX5_SET64(mtpps_reg, in, time_stamp, tstart); | 123 | MLX5_SET64(mtpps_reg, in, time_stamp, tstart); |
124 | MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP); | 124 | MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP); |
125 | mlx5_set_mtpps(mdev, in, sizeof(in)); | 125 | mlx5_set_mtpps(mdev, in, sizeof(in)); |
126 | } | 126 | } |
127 | } | 127 | } |
128 | 128 | ||
129 | static void mlx5_timestamp_overflow(struct work_struct *work) | 129 | static void mlx5_timestamp_overflow(struct work_struct *work) |
130 | { | 130 | { |
131 | struct delayed_work *dwork = to_delayed_work(work); | 131 | struct delayed_work *dwork = to_delayed_work(work); |
132 | struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock, | 132 | struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock, |
133 | overflow_work); | 133 | overflow_work); |
134 | unsigned long flags; | 134 | unsigned long flags; |
135 | 135 | ||
136 | write_seqlock_irqsave(&clock->lock, flags); | 136 | write_seqlock_irqsave(&clock->lock, flags); |
137 | timecounter_read(&clock->tc); | 137 | timecounter_read(&clock->tc); |
138 | mlx5_update_clock_info_page(clock->mdev); | 138 | mlx5_update_clock_info_page(clock->mdev); |
139 | write_sequnlock_irqrestore(&clock->lock, flags); | 139 | write_sequnlock_irqrestore(&clock->lock, flags); |
140 | schedule_delayed_work(&clock->overflow_work, clock->overflow_period); | 140 | schedule_delayed_work(&clock->overflow_work, clock->overflow_period); |
141 | } | 141 | } |
142 | 142 | ||
143 | static int mlx5_ptp_settime(struct ptp_clock_info *ptp, | 143 | static int mlx5_ptp_settime(struct ptp_clock_info *ptp, |
144 | const struct timespec64 *ts) | 144 | const struct timespec64 *ts) |
145 | { | 145 | { |
146 | struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, | 146 | struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, |
147 | ptp_info); | 147 | ptp_info); |
148 | u64 ns = timespec64_to_ns(ts); | 148 | u64 ns = timespec64_to_ns(ts); |
149 | unsigned long flags; | 149 | unsigned long flags; |
150 | 150 | ||
151 | write_seqlock_irqsave(&clock->lock, flags); | 151 | write_seqlock_irqsave(&clock->lock, flags); |
152 | timecounter_init(&clock->tc, &clock->cycles, ns); | 152 | timecounter_init(&clock->tc, &clock->cycles, ns); |
153 | mlx5_update_clock_info_page(clock->mdev); | 153 | mlx5_update_clock_info_page(clock->mdev); |
154 | write_sequnlock_irqrestore(&clock->lock, flags); | 154 | write_sequnlock_irqrestore(&clock->lock, flags); |
155 | 155 | ||
156 | return 0; | 156 | return 0; |
157 | } | 157 | } |
158 | 158 | ||
159 | static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, | 159 | static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, |
160 | struct ptp_system_timestamp *sts) | 160 | struct ptp_system_timestamp *sts) |
161 | { | 161 | { |
162 | struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, | 162 | struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, |
163 | ptp_info); | 163 | ptp_info); |
164 | struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, | 164 | struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, |
165 | clock); | 165 | clock); |
166 | unsigned long flags; | 166 | unsigned long flags; |
167 | u64 cycles, ns; | 167 | u64 cycles, ns; |
168 | 168 | ||
169 | write_seqlock_irqsave(&clock->lock, flags); | 169 | write_seqlock_irqsave(&clock->lock, flags); |
170 | cycles = mlx5_read_internal_timer(mdev, sts); | 170 | cycles = mlx5_read_internal_timer(mdev, sts); |
171 | ns = timecounter_cyc2time(&clock->tc, cycles); | 171 | ns = timecounter_cyc2time(&clock->tc, cycles); |
172 | write_sequnlock_irqrestore(&clock->lock, flags); | 172 | write_sequnlock_irqrestore(&clock->lock, flags); |
173 | 173 | ||
174 | *ts = ns_to_timespec64(ns); | 174 | *ts = ns_to_timespec64(ns); |
175 | 175 | ||
176 | return 0; | 176 | return 0; |
177 | } | 177 | } |
178 | 178 | ||
179 | static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) | 179 | static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) |
180 | { | 180 | { |
181 | struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, | 181 | struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, |
182 | ptp_info); | 182 | ptp_info); |
183 | unsigned long flags; | 183 | unsigned long flags; |
184 | 184 | ||
185 | write_seqlock_irqsave(&clock->lock, flags); | 185 | write_seqlock_irqsave(&clock->lock, flags); |
186 | timecounter_adjtime(&clock->tc, delta); | 186 | timecounter_adjtime(&clock->tc, delta); |
187 | mlx5_update_clock_info_page(clock->mdev); | 187 | mlx5_update_clock_info_page(clock->mdev); |
188 | write_sequnlock_irqrestore(&clock->lock, flags); | 188 | write_sequnlock_irqrestore(&clock->lock, flags); |
189 | 189 | ||
190 | return 0; | 190 | return 0; |
191 | } | 191 | } |
192 | 192 | ||
193 | static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) | 193 | static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) |
194 | { | 194 | { |
195 | u64 adj; | 195 | u64 adj; |
196 | u32 diff; | 196 | u32 diff; |
197 | unsigned long flags; | 197 | unsigned long flags; |
198 | int neg_adj = 0; | 198 | int neg_adj = 0; |
199 | struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, | 199 | struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, |
200 | ptp_info); | 200 | ptp_info); |
201 | 201 | ||
202 | if (delta < 0) { | 202 | if (delta < 0) { |
203 | neg_adj = 1; | 203 | neg_adj = 1; |
204 | delta = -delta; | 204 | delta = -delta; |
205 | } | 205 | } |
206 | 206 | ||
207 | adj = clock->nominal_c_mult; | 207 | adj = clock->nominal_c_mult; |
208 | adj *= delta; | 208 | adj *= delta; |
209 | diff = div_u64(adj, 1000000000ULL); | 209 | diff = div_u64(adj, 1000000000ULL); |
210 | 210 | ||
211 | write_seqlock_irqsave(&clock->lock, flags); | 211 | write_seqlock_irqsave(&clock->lock, flags); |
212 | timecounter_read(&clock->tc); | 212 | timecounter_read(&clock->tc); |
213 | clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff : | 213 | clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff : |
214 | clock->nominal_c_mult + diff; | 214 | clock->nominal_c_mult + diff; |
215 | mlx5_update_clock_info_page(clock->mdev); | 215 | mlx5_update_clock_info_page(clock->mdev); |
216 | write_sequnlock_irqrestore(&clock->lock, flags); | 216 | write_sequnlock_irqrestore(&clock->lock, flags); |
217 | 217 | ||
218 | return 0; | 218 | return 0; |
219 | } | 219 | } |
220 | 220 | ||
221 | static int mlx5_extts_configure(struct ptp_clock_info *ptp, | 221 | static int mlx5_extts_configure(struct ptp_clock_info *ptp, |
222 | struct ptp_clock_request *rq, | 222 | struct ptp_clock_request *rq, |
223 | int on) | 223 | int on) |
224 | { | 224 | { |
225 | struct mlx5_clock *clock = | 225 | struct mlx5_clock *clock = |
226 | container_of(ptp, struct mlx5_clock, ptp_info); | 226 | container_of(ptp, struct mlx5_clock, ptp_info); |
227 | struct mlx5_core_dev *mdev = | 227 | struct mlx5_core_dev *mdev = |
228 | container_of(clock, struct mlx5_core_dev, clock); | 228 | container_of(clock, struct mlx5_core_dev, clock); |
229 | u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; | 229 | u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; |
230 | u32 field_select = 0; | 230 | u32 field_select = 0; |
231 | u8 pin_mode = 0; | 231 | u8 pin_mode = 0; |
232 | u8 pattern = 0; | 232 | u8 pattern = 0; |
233 | int pin = -1; | 233 | int pin = -1; |
234 | int err = 0; | 234 | int err = 0; |
235 | 235 | ||
236 | if (!MLX5_PPS_CAP(mdev)) | 236 | if (!MLX5_PPS_CAP(mdev)) |
237 | return -EOPNOTSUPP; | 237 | return -EOPNOTSUPP; |
238 | 238 | ||
239 | /* Reject requests with unsupported flags */ | 239 | /* Reject requests with unsupported flags */ |
240 | if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | | 240 | if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | |
241 | PTP_RISING_EDGE | | 241 | PTP_RISING_EDGE | |
242 | PTP_FALLING_EDGE | | 242 | PTP_FALLING_EDGE | |
243 | PTP_STRICT_FLAGS)) | 243 | PTP_STRICT_FLAGS)) |
244 | return -EOPNOTSUPP; | 244 | return -EOPNOTSUPP; |
245 | 245 | ||
246 | /* Reject requests to enable time stamping on both edges. */ | 246 | /* Reject requests to enable time stamping on both edges. */ |
247 | if ((rq->extts.flags & PTP_STRICT_FLAGS) && | 247 | if ((rq->extts.flags & PTP_STRICT_FLAGS) && |
248 | (rq->extts.flags & PTP_ENABLE_FEATURE) && | 248 | (rq->extts.flags & PTP_ENABLE_FEATURE) && |
249 | (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES) | 249 | (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES) |
250 | return -EOPNOTSUPP; | 250 | return -EOPNOTSUPP; |
251 | 251 | ||
252 | if (rq->extts.index >= clock->ptp_info.n_pins) | 252 | if (rq->extts.index >= clock->ptp_info.n_pins) |
253 | return -EINVAL; | 253 | return -EINVAL; |
254 | 254 | ||
255 | if (on) { | 255 | if (on) { |
256 | pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index); | 256 | pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index); |
257 | if (pin < 0) | 257 | if (pin < 0) |
258 | return -EBUSY; | 258 | return -EBUSY; |
259 | pin_mode = MLX5_PIN_MODE_IN; | 259 | pin_mode = MLX5_PIN_MODE_IN; |
260 | pattern = !!(rq->extts.flags & PTP_FALLING_EDGE); | 260 | pattern = !!(rq->extts.flags & PTP_FALLING_EDGE); |
261 | field_select = MLX5_MTPPS_FS_PIN_MODE | | 261 | field_select = MLX5_MTPPS_FS_PIN_MODE | |
262 | MLX5_MTPPS_FS_PATTERN | | 262 | MLX5_MTPPS_FS_PATTERN | |
263 | MLX5_MTPPS_FS_ENABLE; | 263 | MLX5_MTPPS_FS_ENABLE; |
264 | } else { | 264 | } else { |
265 | pin = rq->extts.index; | 265 | pin = rq->extts.index; |
266 | field_select = MLX5_MTPPS_FS_ENABLE; | 266 | field_select = MLX5_MTPPS_FS_ENABLE; |
267 | } | 267 | } |
268 | 268 | ||
269 | MLX5_SET(mtpps_reg, in, pin, pin); | 269 | MLX5_SET(mtpps_reg, in, pin, pin); |
270 | MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); | 270 | MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); |
271 | MLX5_SET(mtpps_reg, in, pattern, pattern); | 271 | MLX5_SET(mtpps_reg, in, pattern, pattern); |
272 | MLX5_SET(mtpps_reg, in, enable, on); | 272 | MLX5_SET(mtpps_reg, in, enable, on); |
273 | MLX5_SET(mtpps_reg, in, field_select, field_select); | 273 | MLX5_SET(mtpps_reg, in, field_select, field_select); |
274 | 274 | ||
275 | err = mlx5_set_mtpps(mdev, in, sizeof(in)); | 275 | err = mlx5_set_mtpps(mdev, in, sizeof(in)); |
276 | if (err) | 276 | if (err) |
277 | return err; | 277 | return err; |
278 | 278 | ||
279 | return mlx5_set_mtppse(mdev, pin, 0, | 279 | return mlx5_set_mtppse(mdev, pin, 0, |
280 | MLX5_EVENT_MODE_REPETETIVE & on); | 280 | MLX5_EVENT_MODE_REPETETIVE & on); |
281 | } | 281 | } |
282 | 282 | ||
283 | static int mlx5_perout_configure(struct ptp_clock_info *ptp, | 283 | static int mlx5_perout_configure(struct ptp_clock_info *ptp, |
284 | struct ptp_clock_request *rq, | 284 | struct ptp_clock_request *rq, |
285 | int on) | 285 | int on) |
286 | { | 286 | { |
287 | struct mlx5_clock *clock = | 287 | struct mlx5_clock *clock = |
288 | container_of(ptp, struct mlx5_clock, ptp_info); | 288 | container_of(ptp, struct mlx5_clock, ptp_info); |
289 | struct mlx5_core_dev *mdev = | 289 | struct mlx5_core_dev *mdev = |
290 | container_of(clock, struct mlx5_core_dev, clock); | 290 | container_of(clock, struct mlx5_core_dev, clock); |
291 | u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; | 291 | u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; |
292 | u64 nsec_now, nsec_delta, time_stamp = 0; | 292 | u64 nsec_now, nsec_delta, time_stamp = 0; |
293 | u64 cycles_now, cycles_delta; | 293 | u64 cycles_now, cycles_delta; |
294 | struct timespec64 ts; | 294 | struct timespec64 ts; |
295 | unsigned long flags; | 295 | unsigned long flags; |
296 | u32 field_select = 0; | 296 | u32 field_select = 0; |
297 | u8 pin_mode = 0; | 297 | u8 pin_mode = 0; |
298 | u8 pattern = 0; | 298 | u8 pattern = 0; |
299 | int pin = -1; | 299 | int pin = -1; |
300 | int err = 0; | 300 | int err = 0; |
301 | s64 ns; | 301 | s64 ns; |
302 | 302 | ||
303 | if (!MLX5_PPS_CAP(mdev)) | 303 | if (!MLX5_PPS_CAP(mdev)) |
304 | return -EOPNOTSUPP; | 304 | return -EOPNOTSUPP; |
305 | 305 | ||
306 | /* Reject requests with unsupported flags */ | 306 | /* Reject requests with unsupported flags */ |
307 | if (rq->perout.flags) | 307 | if (rq->perout.flags) |
308 | return -EOPNOTSUPP; | 308 | return -EOPNOTSUPP; |
309 | 309 | ||
310 | if (rq->perout.index >= clock->ptp_info.n_pins) | 310 | if (rq->perout.index >= clock->ptp_info.n_pins) |
311 | return -EINVAL; | 311 | return -EINVAL; |
312 | 312 | ||
313 | if (on) { | 313 | if (on) { |
314 | pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, | 314 | pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, |
315 | rq->perout.index); | 315 | rq->perout.index); |
316 | if (pin < 0) | 316 | if (pin < 0) |
317 | return -EBUSY; | 317 | return -EBUSY; |
318 | 318 | ||
319 | pin_mode = MLX5_PIN_MODE_OUT; | 319 | pin_mode = MLX5_PIN_MODE_OUT; |
320 | pattern = MLX5_OUT_PATTERN_PERIODIC; | 320 | pattern = MLX5_OUT_PATTERN_PERIODIC; |
321 | ts.tv_sec = rq->perout.period.sec; | 321 | ts.tv_sec = rq->perout.period.sec; |
322 | ts.tv_nsec = rq->perout.period.nsec; | 322 | ts.tv_nsec = rq->perout.period.nsec; |
323 | ns = timespec64_to_ns(&ts); | 323 | ns = timespec64_to_ns(&ts); |
324 | 324 | ||
325 | if ((ns >> 1) != 500000000LL) | 325 | if ((ns >> 1) != 500000000LL) |
326 | return -EINVAL; | 326 | return -EINVAL; |
327 | 327 | ||
328 | ts.tv_sec = rq->perout.start.sec; | 328 | ts.tv_sec = rq->perout.start.sec; |
329 | ts.tv_nsec = rq->perout.start.nsec; | 329 | ts.tv_nsec = rq->perout.start.nsec; |
330 | ns = timespec64_to_ns(&ts); | 330 | ns = timespec64_to_ns(&ts); |
331 | cycles_now = mlx5_read_internal_timer(mdev, NULL); | 331 | cycles_now = mlx5_read_internal_timer(mdev, NULL); |
332 | write_seqlock_irqsave(&clock->lock, flags); | 332 | write_seqlock_irqsave(&clock->lock, flags); |
333 | nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); | 333 | nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); |
334 | nsec_delta = ns - nsec_now; | 334 | nsec_delta = ns - nsec_now; |
335 | cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, | 335 | cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, |
336 | clock->cycles.mult); | 336 | clock->cycles.mult); |
337 | write_sequnlock_irqrestore(&clock->lock, flags); | 337 | write_sequnlock_irqrestore(&clock->lock, flags); |
338 | time_stamp = cycles_now + cycles_delta; | 338 | time_stamp = cycles_now + cycles_delta; |
339 | field_select = MLX5_MTPPS_FS_PIN_MODE | | 339 | field_select = MLX5_MTPPS_FS_PIN_MODE | |
340 | MLX5_MTPPS_FS_PATTERN | | 340 | MLX5_MTPPS_FS_PATTERN | |
341 | MLX5_MTPPS_FS_ENABLE | | 341 | MLX5_MTPPS_FS_ENABLE | |
342 | MLX5_MTPPS_FS_TIME_STAMP; | 342 | MLX5_MTPPS_FS_TIME_STAMP; |
343 | } else { | 343 | } else { |
344 | pin = rq->perout.index; | 344 | pin = rq->perout.index; |
345 | field_select = MLX5_MTPPS_FS_ENABLE; | 345 | field_select = MLX5_MTPPS_FS_ENABLE; |
346 | } | 346 | } |
347 | 347 | ||
348 | MLX5_SET(mtpps_reg, in, pin, pin); | 348 | MLX5_SET(mtpps_reg, in, pin, pin); |
349 | MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); | 349 | MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); |
350 | MLX5_SET(mtpps_reg, in, pattern, pattern); | 350 | MLX5_SET(mtpps_reg, in, pattern, pattern); |
351 | MLX5_SET(mtpps_reg, in, enable, on); | 351 | MLX5_SET(mtpps_reg, in, enable, on); |
352 | MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); | 352 | MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); |
353 | MLX5_SET(mtpps_reg, in, field_select, field_select); | 353 | MLX5_SET(mtpps_reg, in, field_select, field_select); |
354 | 354 | ||
355 | err = mlx5_set_mtpps(mdev, in, sizeof(in)); | 355 | err = mlx5_set_mtpps(mdev, in, sizeof(in)); |
356 | if (err) | 356 | if (err) |
357 | return err; | 357 | return err; |
358 | 358 | ||
359 | return mlx5_set_mtppse(mdev, pin, 0, | 359 | return mlx5_set_mtppse(mdev, pin, 0, |
360 | MLX5_EVENT_MODE_REPETETIVE & on); | 360 | MLX5_EVENT_MODE_REPETETIVE & on); |
361 | } | 361 | } |
362 | 362 | ||
363 | static int mlx5_pps_configure(struct ptp_clock_info *ptp, | 363 | static int mlx5_pps_configure(struct ptp_clock_info *ptp, |
364 | struct ptp_clock_request *rq, | 364 | struct ptp_clock_request *rq, |
365 | int on) | 365 | int on) |
366 | { | 366 | { |
367 | struct mlx5_clock *clock = | 367 | struct mlx5_clock *clock = |
368 | container_of(ptp, struct mlx5_clock, ptp_info); | 368 | container_of(ptp, struct mlx5_clock, ptp_info); |
369 | 369 | ||
370 | clock->pps_info.enabled = !!on; | 370 | clock->pps_info.enabled = !!on; |
371 | return 0; | 371 | return 0; |
372 | } | 372 | } |
373 | 373 | ||
374 | static int mlx5_ptp_enable(struct ptp_clock_info *ptp, | 374 | static int mlx5_ptp_enable(struct ptp_clock_info *ptp, |
375 | struct ptp_clock_request *rq, | 375 | struct ptp_clock_request *rq, |
376 | int on) | 376 | int on) |
377 | { | 377 | { |
378 | switch (rq->type) { | 378 | switch (rq->type) { |
379 | case PTP_CLK_REQ_EXTTS: | 379 | case PTP_CLK_REQ_EXTTS: |
380 | return mlx5_extts_configure(ptp, rq, on); | 380 | return mlx5_extts_configure(ptp, rq, on); |
381 | case PTP_CLK_REQ_PEROUT: | 381 | case PTP_CLK_REQ_PEROUT: |
382 | return mlx5_perout_configure(ptp, rq, on); | 382 | return mlx5_perout_configure(ptp, rq, on); |
383 | case PTP_CLK_REQ_PPS: | 383 | case PTP_CLK_REQ_PPS: |
384 | return mlx5_pps_configure(ptp, rq, on); | 384 | return mlx5_pps_configure(ptp, rq, on); |
385 | default: | 385 | default: |
386 | return -EOPNOTSUPP; | 386 | return -EOPNOTSUPP; |
387 | } | 387 | } |
388 | return 0; | 388 | return 0; |
389 | } | 389 | } |
390 | 390 | ||
391 | enum { | ||
392 | MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0), | ||
393 | MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1), | ||
394 | }; | ||
395 | |||
391 | static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, | 396 | static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, |
392 | enum ptp_pin_function func, unsigned int chan) | 397 | enum ptp_pin_function func, unsigned int chan) |
393 | { | 398 | { |
394 | return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0; | 399 | struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, |
400 | ptp_info); | ||
401 | |||
402 | switch (func) { | ||
403 | case PTP_PF_NONE: | ||
404 | return 0; | ||
405 | case PTP_PF_EXTTS: | ||
406 | return !(clock->pps_info.pin_caps[pin] & | ||
407 | MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN); | ||
408 | case PTP_PF_PEROUT: | ||
409 | return !(clock->pps_info.pin_caps[pin] & | ||
410 | MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT); | ||
411 | default: | ||
412 | return -EOPNOTSUPP; | ||
413 | } | ||
414 | |||
415 | return -EOPNOTSUPP; | ||
395 | } | 416 | } |
396 | 417 | ||
397 | static const struct ptp_clock_info mlx5_ptp_clock_info = { | 418 | static const struct ptp_clock_info mlx5_ptp_clock_info = { |
398 | .owner = THIS_MODULE, | 419 | .owner = THIS_MODULE, |
399 | .name = "mlx5_p2p", | 420 | .name = "mlx5_p2p", |
400 | .max_adj = 100000000, | 421 | .max_adj = 100000000, |
401 | .n_alarm = 0, | 422 | .n_alarm = 0, |
402 | .n_ext_ts = 0, | 423 | .n_ext_ts = 0, |
403 | .n_per_out = 0, | 424 | .n_per_out = 0, |
404 | .n_pins = 0, | 425 | .n_pins = 0, |
405 | .pps = 0, | 426 | .pps = 0, |
406 | .adjfreq = mlx5_ptp_adjfreq, | 427 | .adjfreq = mlx5_ptp_adjfreq, |
407 | .adjtime = mlx5_ptp_adjtime, | 428 | .adjtime = mlx5_ptp_adjtime, |
408 | .gettimex64 = mlx5_ptp_gettimex, | 429 | .gettimex64 = mlx5_ptp_gettimex, |
409 | .settime64 = mlx5_ptp_settime, | 430 | .settime64 = mlx5_ptp_settime, |
410 | .enable = NULL, | 431 | .enable = NULL, |
411 | .verify = NULL, | 432 | .verify = NULL, |
412 | }; | 433 | }; |
413 | 434 | ||
414 | static int mlx5_init_pin_config(struct mlx5_clock *clock) | 435 | static int mlx5_init_pin_config(struct mlx5_clock *clock) |
415 | { | 436 | { |
416 | int i; | 437 | int i; |
417 | 438 | ||
418 | clock->ptp_info.pin_config = | 439 | clock->ptp_info.pin_config = |
419 | kcalloc(clock->ptp_info.n_pins, | 440 | kcalloc(clock->ptp_info.n_pins, |
420 | sizeof(*clock->ptp_info.pin_config), | 441 | sizeof(*clock->ptp_info.pin_config), |
421 | GFP_KERNEL); | 442 | GFP_KERNEL); |
422 | if (!clock->ptp_info.pin_config) | 443 | if (!clock->ptp_info.pin_config) |
423 | return -ENOMEM; | 444 | return -ENOMEM; |
424 | clock->ptp_info.enable = mlx5_ptp_enable; | 445 | clock->ptp_info.enable = mlx5_ptp_enable; |
425 | clock->ptp_info.verify = mlx5_ptp_verify; | 446 | clock->ptp_info.verify = mlx5_ptp_verify; |
426 | clock->ptp_info.pps = 1; | 447 | clock->ptp_info.pps = 1; |
427 | 448 | ||
428 | for (i = 0; i < clock->ptp_info.n_pins; i++) { | 449 | for (i = 0; i < clock->ptp_info.n_pins; i++) { |
429 | snprintf(clock->ptp_info.pin_config[i].name, | 450 | snprintf(clock->ptp_info.pin_config[i].name, |
430 | sizeof(clock->ptp_info.pin_config[i].name), | 451 | sizeof(clock->ptp_info.pin_config[i].name), |
431 | "mlx5_pps%d", i); | 452 | "mlx5_pps%d", i); |
432 | clock->ptp_info.pin_config[i].index = i; | 453 | clock->ptp_info.pin_config[i].index = i; |
433 | clock->ptp_info.pin_config[i].func = PTP_PF_NONE; | 454 | clock->ptp_info.pin_config[i].func = PTP_PF_NONE; |
434 | clock->ptp_info.pin_config[i].chan = i; | 455 | clock->ptp_info.pin_config[i].chan = i; |
435 | } | 456 | } |
436 | 457 | ||
437 | return 0; | 458 | return 0; |
438 | } | 459 | } |
439 | 460 | ||
440 | static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev) | 461 | static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev) |
441 | { | 462 | { |
442 | struct mlx5_clock *clock = &mdev->clock; | 463 | struct mlx5_clock *clock = &mdev->clock; |
443 | u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; | 464 | u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; |
444 | 465 | ||
445 | mlx5_query_mtpps(mdev, out, sizeof(out)); | 466 | mlx5_query_mtpps(mdev, out, sizeof(out)); |
446 | 467 | ||
447 | clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out, | 468 | clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out, |
448 | cap_number_of_pps_pins); | 469 | cap_number_of_pps_pins); |
449 | clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out, | 470 | clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out, |
450 | cap_max_num_of_pps_in_pins); | 471 | cap_max_num_of_pps_in_pins); |
451 | clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, | 472 | clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, |
452 | cap_max_num_of_pps_out_pins); | 473 | cap_max_num_of_pps_out_pins); |
453 | 474 | ||
454 | clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); | 475 | clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); |
455 | clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); | 476 | clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); |
456 | clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); | 477 | clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); |
457 | clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); | 478 | clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); |
458 | clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); | 479 | clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); |
459 | clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); | 480 | clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); |
460 | clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); | 481 | clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); |
461 | clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); | 482 | clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); |
462 | } | 483 | } |
463 | 484 | ||
464 | static int mlx5_pps_event(struct notifier_block *nb, | 485 | static int mlx5_pps_event(struct notifier_block *nb, |
465 | unsigned long type, void *data) | 486 | unsigned long type, void *data) |
466 | { | 487 | { |
467 | struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb); | 488 | struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb); |
468 | struct mlx5_core_dev *mdev = clock->mdev; | 489 | struct mlx5_core_dev *mdev = clock->mdev; |
469 | struct ptp_clock_event ptp_event; | 490 | struct ptp_clock_event ptp_event; |
470 | u64 cycles_now, cycles_delta; | 491 | u64 cycles_now, cycles_delta; |
471 | u64 nsec_now, nsec_delta, ns; | 492 | u64 nsec_now, nsec_delta, ns; |
472 | struct mlx5_eqe *eqe = data; | 493 | struct mlx5_eqe *eqe = data; |
473 | int pin = eqe->data.pps.pin; | 494 | int pin = eqe->data.pps.pin; |
474 | struct timespec64 ts; | 495 | struct timespec64 ts; |
475 | unsigned long flags; | 496 | unsigned long flags; |
476 | 497 | ||
477 | switch (clock->ptp_info.pin_config[pin].func) { | 498 | switch (clock->ptp_info.pin_config[pin].func) { |
478 | case PTP_PF_EXTTS: | 499 | case PTP_PF_EXTTS: |
479 | ptp_event.index = pin; | 500 | ptp_event.index = pin; |
480 | ptp_event.timestamp = timecounter_cyc2time(&clock->tc, | 501 | ptp_event.timestamp = timecounter_cyc2time(&clock->tc, |
481 | be64_to_cpu(eqe->data.pps.time_stamp)); | 502 | be64_to_cpu(eqe->data.pps.time_stamp)); |
482 | if (clock->pps_info.enabled) { | 503 | if (clock->pps_info.enabled) { |
483 | ptp_event.type = PTP_CLOCK_PPSUSR; | 504 | ptp_event.type = PTP_CLOCK_PPSUSR; |
484 | ptp_event.pps_times.ts_real = | 505 | ptp_event.pps_times.ts_real = |
485 | ns_to_timespec64(ptp_event.timestamp); | 506 | ns_to_timespec64(ptp_event.timestamp); |
486 | } else { | 507 | } else { |
487 | ptp_event.type = PTP_CLOCK_EXTTS; | 508 | ptp_event.type = PTP_CLOCK_EXTTS; |
488 | } | 509 | } |
489 | /* TODOL clock->ptp can be NULL if ptp_clock_register failes */ | 510 | /* TODOL clock->ptp can be NULL if ptp_clock_register failes */ |
490 | ptp_clock_event(clock->ptp, &ptp_event); | 511 | ptp_clock_event(clock->ptp, &ptp_event); |
491 | break; | 512 | break; |
492 | case PTP_PF_PEROUT: | 513 | case PTP_PF_PEROUT: |
493 | mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL); | 514 | mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL); |
494 | cycles_now = mlx5_read_internal_timer(mdev, NULL); | 515 | cycles_now = mlx5_read_internal_timer(mdev, NULL); |
495 | ts.tv_sec += 1; | 516 | ts.tv_sec += 1; |
496 | ts.tv_nsec = 0; | 517 | ts.tv_nsec = 0; |
497 | ns = timespec64_to_ns(&ts); | 518 | ns = timespec64_to_ns(&ts); |
498 | write_seqlock_irqsave(&clock->lock, flags); | 519 | write_seqlock_irqsave(&clock->lock, flags); |
499 | nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); | 520 | nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); |
500 | nsec_delta = ns - nsec_now; | 521 | nsec_delta = ns - nsec_now; |
501 | cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, | 522 | cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, |
502 | clock->cycles.mult); | 523 | clock->cycles.mult); |
503 | clock->pps_info.start[pin] = cycles_now + cycles_delta; | 524 | clock->pps_info.start[pin] = cycles_now + cycles_delta; |
504 | schedule_work(&clock->pps_info.out_work); | 525 | schedule_work(&clock->pps_info.out_work); |
505 | write_sequnlock_irqrestore(&clock->lock, flags); | 526 | write_sequnlock_irqrestore(&clock->lock, flags); |
506 | break; | 527 | break; |
507 | default: | 528 | default: |
508 | mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n", | 529 | mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n", |
509 | clock->ptp_info.pin_config[pin].func); | 530 | clock->ptp_info.pin_config[pin].func); |
510 | } | 531 | } |
511 | 532 | ||
512 | return NOTIFY_OK; | 533 | return NOTIFY_OK; |
513 | } | 534 | } |
514 | 535 | ||
515 | void mlx5_init_clock(struct mlx5_core_dev *mdev) | 536 | void mlx5_init_clock(struct mlx5_core_dev *mdev) |
516 | { | 537 | { |
517 | struct mlx5_clock *clock = &mdev->clock; | 538 | struct mlx5_clock *clock = &mdev->clock; |
518 | u64 overflow_cycles; | 539 | u64 overflow_cycles; |
519 | u64 ns; | 540 | u64 ns; |
520 | u64 frac = 0; | 541 | u64 frac = 0; |
521 | u32 dev_freq; | 542 | u32 dev_freq; |
522 | 543 | ||
523 | dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz); | 544 | dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz); |
524 | if (!dev_freq) { | 545 | if (!dev_freq) { |
525 | mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n"); | 546 | mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n"); |
526 | return; | 547 | return; |
527 | } | 548 | } |
528 | seqlock_init(&clock->lock); | 549 | seqlock_init(&clock->lock); |
529 | clock->cycles.read = read_internal_timer; | 550 | clock->cycles.read = read_internal_timer; |
530 | clock->cycles.shift = MLX5_CYCLES_SHIFT; | 551 | clock->cycles.shift = MLX5_CYCLES_SHIFT; |
531 | clock->cycles.mult = clocksource_khz2mult(dev_freq, | 552 | clock->cycles.mult = clocksource_khz2mult(dev_freq, |
532 | clock->cycles.shift); | 553 | clock->cycles.shift); |
533 | clock->nominal_c_mult = clock->cycles.mult; | 554 | clock->nominal_c_mult = clock->cycles.mult; |
534 | clock->cycles.mask = CLOCKSOURCE_MASK(41); | 555 | clock->cycles.mask = CLOCKSOURCE_MASK(41); |
535 | clock->mdev = mdev; | 556 | clock->mdev = mdev; |
536 | 557 | ||
537 | timecounter_init(&clock->tc, &clock->cycles, | 558 | timecounter_init(&clock->tc, &clock->cycles, |
538 | ktime_to_ns(ktime_get_real())); | 559 | ktime_to_ns(ktime_get_real())); |
539 | 560 | ||
540 | /* Calculate period in seconds to call the overflow watchdog - to make | 561 | /* Calculate period in seconds to call the overflow watchdog - to make |
541 | * sure counter is checked at least twice every wrap around. | 562 | * sure counter is checked at least twice every wrap around. |
542 | * The period is calculated as the minimum between max HW cycles count | 563 | * The period is calculated as the minimum between max HW cycles count |
543 | * (The clock source mask) and max amount of cycles that can be | 564 | * (The clock source mask) and max amount of cycles that can be |
544 | * multiplied by clock multiplier where the result doesn't exceed | 565 | * multiplied by clock multiplier where the result doesn't exceed |
545 | * 64bits. | 566 | * 64bits. |
546 | */ | 567 | */ |
547 | overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult); | 568 | overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult); |
548 | overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3)); | 569 | overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3)); |
549 | 570 | ||
550 | ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles, | 571 | ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles, |
551 | frac, &frac); | 572 | frac, &frac); |
552 | do_div(ns, NSEC_PER_SEC / HZ); | 573 | do_div(ns, NSEC_PER_SEC / HZ); |
553 | clock->overflow_period = ns; | 574 | clock->overflow_period = ns; |
554 | 575 | ||
555 | mdev->clock_info = | 576 | mdev->clock_info = |
556 | (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL); | 577 | (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL); |
557 | if (mdev->clock_info) { | 578 | if (mdev->clock_info) { |
558 | mdev->clock_info->nsec = clock->tc.nsec; | 579 | mdev->clock_info->nsec = clock->tc.nsec; |
559 | mdev->clock_info->cycles = clock->tc.cycle_last; | 580 | mdev->clock_info->cycles = clock->tc.cycle_last; |
560 | mdev->clock_info->mask = clock->cycles.mask; | 581 | mdev->clock_info->mask = clock->cycles.mask; |
561 | mdev->clock_info->mult = clock->nominal_c_mult; | 582 | mdev->clock_info->mult = clock->nominal_c_mult; |
562 | mdev->clock_info->shift = clock->cycles.shift; | 583 | mdev->clock_info->shift = clock->cycles.shift; |
563 | mdev->clock_info->frac = clock->tc.frac; | 584 | mdev->clock_info->frac = clock->tc.frac; |
564 | mdev->clock_info->overflow_period = clock->overflow_period; | 585 | mdev->clock_info->overflow_period = clock->overflow_period; |
565 | } | 586 | } |
566 | 587 | ||
567 | INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out); | 588 | INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out); |
568 | INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow); | 589 | INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow); |
569 | if (clock->overflow_period) | 590 | if (clock->overflow_period) |
570 | schedule_delayed_work(&clock->overflow_work, 0); | 591 | schedule_delayed_work(&clock->overflow_work, 0); |
571 | else | 592 | else |
572 | mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n"); | 593 | mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n"); |
573 | 594 | ||
574 | /* Configure the PHC */ | 595 | /* Configure the PHC */ |
575 | clock->ptp_info = mlx5_ptp_clock_info; | 596 | clock->ptp_info = mlx5_ptp_clock_info; |
576 | 597 | ||
577 | /* Initialize 1PPS data structures */ | 598 | /* Initialize 1PPS data structures */ |
578 | if (MLX5_PPS_CAP(mdev)) | 599 | if (MLX5_PPS_CAP(mdev)) |
579 | mlx5_get_pps_caps(mdev); | 600 | mlx5_get_pps_caps(mdev); |
580 | if (clock->ptp_info.n_pins) | 601 | if (clock->ptp_info.n_pins) |
581 | mlx5_init_pin_config(clock); | 602 | mlx5_init_pin_config(clock); |
582 | 603 | ||
583 | clock->ptp = ptp_clock_register(&clock->ptp_info, | 604 | clock->ptp = ptp_clock_register(&clock->ptp_info, |
584 | &mdev->pdev->dev); | 605 | &mdev->pdev->dev); |
585 | if (IS_ERR(clock->ptp)) { | 606 | if (IS_ERR(clock->ptp)) { |
586 | mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n", | 607 | mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n", |
587 | PTR_ERR(clock->ptp)); | 608 | PTR_ERR(clock->ptp)); |
588 | clock->ptp = NULL; | 609 | clock->ptp = NULL; |
589 | } | 610 | } |
590 | 611 | ||
591 | MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT); | 612 | MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT); |
592 | mlx5_eq_notifier_register(mdev, &clock->pps_nb); | 613 | mlx5_eq_notifier_register(mdev, &clock->pps_nb); |
593 | } | 614 | } |
594 | 615 | ||
595 | void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) | 616 | void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) |
596 | { | 617 | { |
597 | struct mlx5_clock *clock = &mdev->clock; | 618 | struct mlx5_clock *clock = &mdev->clock; |
598 | 619 | ||
599 | if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) | 620 | if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) |
600 | return; | 621 | return; |
601 | 622 | ||
602 | mlx5_eq_notifier_unregister(mdev, &clock->pps_nb); | 623 | mlx5_eq_notifier_unregister(mdev, &clock->pps_nb); |
603 | if (clock->ptp) { | 624 | if (clock->ptp) { |
604 | ptp_clock_unregister(clock->ptp); | 625 | ptp_clock_unregister(clock->ptp); |
605 | clock->ptp = NULL; | 626 | clock->ptp = NULL; |
606 | } | 627 | } |
607 | 628 | ||
608 | cancel_work_sync(&clock->pps_info.out_work); | 629 | cancel_work_sync(&clock->pps_info.out_work); |
609 | cancel_delayed_work_sync(&clock->overflow_work); | 630 | cancel_delayed_work_sync(&clock->overflow_work); |
610 | 631 | ||
611 | if (mdev->clock_info) { | 632 | if (mdev->clock_info) { |
612 | free_page((unsigned long)mdev->clock_info); | 633 | free_page((unsigned long)mdev->clock_info); |
613 | mdev->clock_info = NULL; | 634 | mdev->clock_info = NULL; |
614 | } | 635 | } |
615 | 636 | ||
616 | kfree(clock->ptp_info.pin_config); | 637 | kfree(clock->ptp_info.pin_config); |
617 | } | 638 | } |
618 | 639 |