Blame view

drivers/thunderbolt/ctl.c 25.4 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
2
  /*
15c6784c7   Mika Westerberg   thunderbolt: Add ...
3
   * Thunderbolt driver - control channel and configuration commands
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
4
5
   *
   * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
15c6784c7   Mika Westerberg   thunderbolt: Add ...
6
   * Copyright (C) 2018, Intel Corporation
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
7
8
9
   */
  
  #include <linux/crc32.h>
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
10
  #include <linux/delay.h>
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
11
12
13
14
  #include <linux/slab.h>
  #include <linux/pci.h>
  #include <linux/dmapool.h>
  #include <linux/workqueue.h>
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
15
16
  
  #include "ctl.h"
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
17
18
  #define TB_CTL_RX_PKG_COUNT	10
  #define TB_CTL_RETRIES		4
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
19
20
21
22
23
24
25
26
27
28
29
  
  /**
   * struct tb_cfg - thunderbolt control channel
   */
  struct tb_ctl {
  	struct tb_nhi *nhi;
  	struct tb_ring *tx;
  	struct tb_ring *rx;
  
  	struct dma_pool *frame_pool;
  	struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
30
31
32
  	struct mutex request_queue_lock;
  	struct list_head request_queue;
  	bool running;
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
33

81a54b5e1   Mika Westerberg   thunderbolt: Let ...
34
  	event_cb callback;
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
  	void *callback_data;
  };
  
  
  #define tb_ctl_WARN(ctl, format, arg...) \
  	dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
  
  #define tb_ctl_err(ctl, format, arg...) \
  	dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
  
  #define tb_ctl_warn(ctl, format, arg...) \
  	dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
  
  #define tb_ctl_info(ctl, format, arg...) \
  	dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
81a54b5e1   Mika Westerberg   thunderbolt: Let ...
50
51
  #define tb_ctl_dbg(ctl, format, arg...) \
  	dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
  static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
  /* Serializes access to request kref_get/put */
  static DEFINE_MUTEX(tb_cfg_request_lock);
  
  /**
   * tb_cfg_request_alloc() - Allocates a new config request
   *
   * This is refcounted object so when you are done with this, call
   * tb_cfg_request_put() to it.
   */
  struct tb_cfg_request *tb_cfg_request_alloc(void)
  {
  	struct tb_cfg_request *req;
  
  	req = kzalloc(sizeof(*req), GFP_KERNEL);
  	if (!req)
  		return NULL;
  
  	kref_init(&req->kref);
  
  	return req;
  }
  
  /**
   * tb_cfg_request_get() - Increase refcount of a request
   * @req: Request whose refcount is increased
   */
  void tb_cfg_request_get(struct tb_cfg_request *req)
  {
  	mutex_lock(&tb_cfg_request_lock);
  	kref_get(&req->kref);
  	mutex_unlock(&tb_cfg_request_lock);
  }
  
  static void tb_cfg_request_destroy(struct kref *kref)
  {
  	struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
  
  	kfree(req);
  }
  
  /**
   * tb_cfg_request_put() - Decrease refcount and possibly release the request
   * @req: Request whose refcount is decreased
   *
   * Call this function when you are done with the request. When refcount
   * goes to %0 the object is released.
   */
  void tb_cfg_request_put(struct tb_cfg_request *req)
  {
  	mutex_lock(&tb_cfg_request_lock);
  	kref_put(&req->kref, tb_cfg_request_destroy);
  	mutex_unlock(&tb_cfg_request_lock);
  }
  
  static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
  				  struct tb_cfg_request *req)
  {
  	WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
  	WARN_ON(req->ctl);
  
  	mutex_lock(&ctl->request_queue_lock);
  	if (!ctl->running) {
  		mutex_unlock(&ctl->request_queue_lock);
  		return -ENOTCONN;
  	}
  	req->ctl = ctl;
  	list_add_tail(&req->list, &ctl->request_queue);
  	set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
  	mutex_unlock(&ctl->request_queue_lock);
  	return 0;
  }
  
  static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
  {
  	struct tb_ctl *ctl = req->ctl;
  
  	mutex_lock(&ctl->request_queue_lock);
  	list_del(&req->list);
  	clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
  	if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
  		wake_up(&tb_cfg_request_cancel_queue);
  	mutex_unlock(&ctl->request_queue_lock);
  }
  
  static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
  {
  	return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
  }
  
  static struct tb_cfg_request *
  tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
  {
  	struct tb_cfg_request *req;
  	bool found = false;
  
  	mutex_lock(&pkg->ctl->request_queue_lock);
  	list_for_each_entry(req, &pkg->ctl->request_queue, list) {
  		tb_cfg_request_get(req);
  		if (req->match(req, pkg)) {
  			found = true;
  			break;
  		}
  		tb_cfg_request_put(req);
  	}
  	mutex_unlock(&pkg->ctl->request_queue_lock);
  
  	return found ? req : NULL;
  }
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
161
  /* utility functions */
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
162
163
164
  
  static int check_header(const struct ctl_pkg *pkg, u32 len,
  			enum tb_cfg_pkg_type type, u64 route)
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
  {
  	struct tb_cfg_header *header = pkg->buffer;
  
  	/* check frame, TODO: frame flags */
  	if (WARN(len != pkg->frame.size,
  			"wrong framesize (expected %#x, got %#x)
  ",
  			len, pkg->frame.size))
  		return -EIO;
  	if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)
  ",
  			type, pkg->frame.eof))
  		return -EIO;
  	if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)
  ",
  			pkg->frame.sof))
  		return -EIO;
  
  	/* check header */
  	if (WARN(header->unknown != 1 << 9,
  			"header->unknown is %#x
  ", header->unknown))
  		return -EIO;
ac6c44de5   Mika Westerberg   thunderbolt: Expo...
188
  	if (WARN(route != tb_cfg_get_route(header),
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
189
  			"wrong route (expected %llx, got %llx)",
ac6c44de5   Mika Westerberg   thunderbolt: Expo...
190
  			route, tb_cfg_get_route(header)))
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
  		return -EIO;
  	return 0;
  }
  
  static int check_config_address(struct tb_cfg_address addr,
  				enum tb_cfg_space space, u32 offset,
  				u32 length)
  {
  	if (WARN(addr.zero, "addr.zero is %#x
  ", addr.zero))
  		return -EIO;
  	if (WARN(space != addr.space, "wrong space (expected %x, got %x
  )",
  			space, addr.space))
  		return -EIO;
  	if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x
  )",
  			offset, addr.offset))
  		return -EIO;
  	if (WARN(length != addr.length, "wrong space (expected %x, got %x
  )",
  			length, addr.length))
  		return -EIO;
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
214
215
216
217
218
219
  	/*
  	 * We cannot check addr->port as it is set to the upstream port of the
  	 * sender.
  	 */
  	return 0;
  }
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
220
  static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
221
222
  {
  	struct cfg_error_pkg *pkg = response->buffer;
22255bec2   Mika Westerberg   thunderbolt: Log ...
223
  	struct tb_ctl *ctl = response->ctl;
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
224
  	struct tb_cfg_result res = { 0 };
ac6c44de5   Mika Westerberg   thunderbolt: Expo...
225
  	res.response_route = tb_cfg_get_route(&pkg->header);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
226
227
  	res.response_port = 0;
  	res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
ac6c44de5   Mika Westerberg   thunderbolt: Expo...
228
  			       tb_cfg_get_route(&pkg->header));
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
229
230
  	if (res.err)
  		return res;
22255bec2   Mika Westerberg   thunderbolt: Log ...
231
232
233
234
235
236
237
238
239
  	if (pkg->zero1)
  		tb_ctl_warn(ctl, "pkg->zero1 is %#x
  ", pkg->zero1);
  	if (pkg->zero2)
  		tb_ctl_warn(ctl, "pkg->zero2 is %#x
  ", pkg->zero2);
  	if (pkg->zero3)
  		tb_ctl_warn(ctl, "pkg->zero3 is %#x
  ", pkg->zero3);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
240
241
242
243
244
245
  	res.err = 1;
  	res.tb_error = pkg->error;
  	res.response_port = pkg->port;
  	return res;
  
  }
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
246
  static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
247
248
249
250
251
252
253
254
255
  					 enum tb_cfg_pkg_type type, u64 route)
  {
  	struct tb_cfg_header *header = pkg->buffer;
  	struct tb_cfg_result res = { 0 };
  
  	if (pkg->frame.eof == TB_CFG_PKG_ERROR)
  		return decode_error(pkg);
  
  	res.response_port = 0; /* will be updated later for cfg_read/write */
ac6c44de5   Mika Westerberg   thunderbolt: Expo...
256
  	res.response_route = tb_cfg_get_route(header);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
  	res.err = check_header(pkg, len, type, route);
  	return res;
  }
  
  static void tb_cfg_print_error(struct tb_ctl *ctl,
  			       const struct tb_cfg_result *res)
  {
  	WARN_ON(res->err != 1);
  	switch (res->tb_error) {
  	case TB_CFG_ERROR_PORT_NOT_CONNECTED:
  		/* Port is not connected. This can happen during surprise
  		 * removal. Do not warn. */
  		return;
  	case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
  		/*
  		 * Invalid cfg_space/offset/length combination in
  		 * cfg_read/cfg_write.
  		 */
fa1653d99   Mika Westerberg   thunderbolt: No n...
275
276
277
  		tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset
  ",
  			   res->response_route, res->response_port);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
  		return;
  	case TB_CFG_ERROR_NO_SUCH_PORT:
  		/*
  		 * - The route contains a non-existent port.
  		 * - The route contains a non-PHY port (e.g. PCIe).
  		 * - The port in cfg_read/cfg_write does not exist.
  		 */
  		tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port
  ",
  			res->response_route, res->response_port);
  		return;
  	case TB_CFG_ERROR_LOOP:
  		tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop
  ",
  			res->response_route, res->response_port);
  		return;
80e7c5dd1   Mika Westerberg   thunderbolt: Hand...
294
295
296
297
298
  	case TB_CFG_ERROR_LOCK:
  		tb_ctl_warn(ctl, "%llx:%x: downstream port is locked
  ",
  			    res->response_route, res->response_port);
  		return;
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
299
300
301
302
303
304
305
306
  	default:
  		/* 5,6,7,9 and 11 are also valid error codes */
  		tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error
  ",
  			res->response_route, res->response_port);
  		return;
  	}
  }
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
307
  static __be32 tb_crc(const void *data, size_t len)
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
  {
  	return cpu_to_be32(~__crc32c_le(~0, data, len));
  }
  
  static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
  {
  	if (pkg) {
  		dma_pool_free(pkg->ctl->frame_pool,
  			      pkg->buffer, pkg->frame.buffer_phy);
  		kfree(pkg);
  	}
  }
  
  static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
  {
  	struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
  	if (!pkg)
8db353bdd   Sachin Kamat   thunderbolt: Use ...
325
  		return NULL;
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
326
327
328
329
330
  	pkg->ctl = ctl;
  	pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
  				     &pkg->frame.buffer_phy);
  	if (!pkg->buffer) {
  		kfree(pkg);
8db353bdd   Sachin Kamat   thunderbolt: Use ...
331
  		return NULL;
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
  	}
  	return pkg;
  }
  
  
  /* RX/TX handling */
  
  static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
  			       bool canceled)
  {
  	struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
  	tb_ctl_pkg_free(pkg);
  }
  
  /**
   * tb_cfg_tx() - transmit a packet on the control channel
   *
   * len must be a multiple of four.
   *
   * Return: Returns 0 on success or an error code on failure.
   */
16a1258af   Mika Westerberg   thunderbolt: Use ...
353
  static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
  		     enum tb_cfg_pkg_type type)
  {
  	int res;
  	struct ctl_pkg *pkg;
  	if (len % 4 != 0) { /* required for le->be conversion */
  		tb_ctl_WARN(ctl, "TX: invalid size: %zu
  ", len);
  		return -EINVAL;
  	}
  	if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
  		tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d
  ",
  			    len, TB_FRAME_SIZE - 4);
  		return -EINVAL;
  	}
  	pkg = tb_ctl_pkg_alloc(ctl);
  	if (!pkg)
  		return -ENOMEM;
  	pkg->frame.callback = tb_ctl_tx_callback;
  	pkg->frame.size = len + 4;
  	pkg->frame.sof = type;
  	pkg->frame.eof = type;
  	cpu_to_be32_array(pkg->buffer, data, len / 4);
801dba53f   Andreas Noever   thunderbolt: Add ...
377
  	*(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
378

3b3d9f4da   Mika Westerberg   thunderbolt: Expo...
379
  	res = tb_ring_tx(ctl->tx, &pkg->frame);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
380
381
382
383
384
385
  	if (res) /* ring is stopped */
  		tb_ctl_pkg_free(pkg);
  	return res;
  }
  
  /**
81a54b5e1   Mika Westerberg   thunderbolt: Let ...
386
   * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
387
   */
d1ff70241   Mika Westerberg   thunderbolt: Add ...
388
  static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
81a54b5e1   Mika Westerberg   thunderbolt: Let ...
389
  				struct ctl_pkg *pkg, size_t size)
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
390
  {
d1ff70241   Mika Westerberg   thunderbolt: Add ...
391
  	return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
392
393
394
395
  }
  
  static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
  {
3b3d9f4da   Mika Westerberg   thunderbolt: Expo...
396
  	tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
397
398
399
400
401
402
  					     * We ignore failures during stop.
  					     * All rx packets are referenced
  					     * from ctl->rx_packets, so we do
  					     * not loose them.
  					     */
  }
81a54b5e1   Mika Westerberg   thunderbolt: Let ...
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
  static int tb_async_error(const struct ctl_pkg *pkg)
  {
  	const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
  
  	if (pkg->frame.eof != TB_CFG_PKG_ERROR)
  		return false;
  
  	switch (error->error) {
  	case TB_CFG_ERROR_LINK_ERROR:
  	case TB_CFG_ERROR_HEC_ERROR_DETECTED:
  	case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
  		return true;
  
  	default:
  		return false;
  	}
  }
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
420
421
422
423
  static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
  			       bool canceled)
  {
  	struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
424
  	struct tb_cfg_request *req;
81a54b5e1   Mika Westerberg   thunderbolt: Let ...
425
  	__be32 crc32;
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
  
  	if (canceled)
  		return; /*
  			 * ring is stopped, packet is referenced from
  			 * ctl->rx_packets.
  			 */
  
  	if (frame->size < 4 || frame->size % 4 != 0) {
  		tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet
  ",
  			   frame->size);
  		goto rx;
  	}
  
  	frame->size -= 4; /* remove checksum */
81a54b5e1   Mika Westerberg   thunderbolt: Let ...
441
  	crc32 = tb_crc(pkg->buffer, frame->size);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
442
  	be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
81a54b5e1   Mika Westerberg   thunderbolt: Let ...
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
  	switch (frame->eof) {
  	case TB_CFG_PKG_READ:
  	case TB_CFG_PKG_WRITE:
  	case TB_CFG_PKG_ERROR:
  	case TB_CFG_PKG_OVERRIDE:
  	case TB_CFG_PKG_RESET:
  		if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
  			tb_ctl_err(pkg->ctl,
  				   "RX: checksum mismatch, dropping packet
  ");
  			goto rx;
  		}
  		if (tb_async_error(pkg)) {
  			tb_ctl_handle_event(pkg->ctl, frame->eof,
  					    pkg, frame->size);
  			goto rx;
  		}
  		break;
  
  	case TB_CFG_PKG_EVENT:
d1ff70241   Mika Westerberg   thunderbolt: Add ...
463
464
  	case TB_CFG_PKG_XDOMAIN_RESP:
  	case TB_CFG_PKG_XDOMAIN_REQ:
81a54b5e1   Mika Westerberg   thunderbolt: Let ...
465
466
467
468
469
470
  		if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
  			tb_ctl_err(pkg->ctl,
  				   "RX: checksum mismatch, dropping packet
  ");
  			goto rx;
  		}
df561f668   Gustavo A. R. Silva   treewide: Use fal...
471
  		fallthrough;
f67cf4911   Mika Westerberg   thunderbolt: Add ...
472
  	case TB_CFG_PKG_ICM_EVENT:
d1ff70241   Mika Westerberg   thunderbolt: Add ...
473
474
475
  		if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
  			goto rx;
  		break;
81a54b5e1   Mika Westerberg   thunderbolt: Let ...
476
477
  
  	default:
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
478
  		break;
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
479
  	}
81a54b5e1   Mika Westerberg   thunderbolt: Let ...
480

d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
481
482
483
484
485
486
487
488
489
490
491
  	/*
  	 * The received packet will be processed only if there is an
  	 * active request and that the packet is what is expected. This
  	 * prevents packets such as replies coming after timeout has
  	 * triggered from messing with the active requests.
  	 */
  	req = tb_cfg_request_find(pkg->ctl, pkg);
  	if (req) {
  		if (req->copy(req, pkg))
  			schedule_work(&req->work);
  		tb_cfg_request_put(req);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
492
  	}
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
493

f25bf6fcb   Andreas Noever   thunderbolt: Add ...
494
495
496
  rx:
  	tb_ctl_rx_submit(pkg);
  }
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
497
498
499
500
501
502
503
504
505
506
  static void tb_cfg_request_work(struct work_struct *work)
  {
  	struct tb_cfg_request *req = container_of(work, typeof(*req), work);
  
  	if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
  		req->callback(req->callback_data);
  
  	tb_cfg_request_dequeue(req);
  	tb_cfg_request_put(req);
  }
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
507
  /**
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
508
509
510
511
512
513
514
515
   * tb_cfg_request() - Start control request not waiting for it to complete
   * @ctl: Control channel to use
   * @req: Request to start
   * @callback: Callback called when the request is completed
   * @callback_data: Data to be passed to @callback
   *
   * This queues @req on the given control channel without waiting for it
   * to complete. When the request completes @callback is called.
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
516
   */
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
517
518
  int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
  		   void (*callback)(void *), void *callback_data)
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
519
  {
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
520
  	int ret;
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
521

d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
522
523
524
525
526
  	req->flags = 0;
  	req->callback = callback;
  	req->callback_data = callback_data;
  	INIT_WORK(&req->work, tb_cfg_request_work);
  	INIT_LIST_HEAD(&req->list);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
527

d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
  	tb_cfg_request_get(req);
  	ret = tb_cfg_request_enqueue(ctl, req);
  	if (ret)
  		goto err_put;
  
  	ret = tb_ctl_tx(ctl, req->request, req->request_size,
  			req->request_type);
  	if (ret)
  		goto err_dequeue;
  
  	if (!req->response)
  		schedule_work(&req->work);
  
  	return 0;
  
  err_dequeue:
  	tb_cfg_request_dequeue(req);
  err_put:
  	tb_cfg_request_put(req);
  
  	return ret;
  }
  
  /**
   * tb_cfg_request_cancel() - Cancel a control request
   * @req: Request to cancel
   * @err: Error to assign to the request
   *
   * This function can be used to cancel ongoing request. It will wait
   * until the request is not active anymore.
   */
  void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
  {
  	set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
  	schedule_work(&req->work);
  	wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
  	req->result.err = err;
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
565
  }
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
  static void tb_cfg_request_complete(void *data)
  {
  	complete(data);
  }
  
  /**
   * tb_cfg_request_sync() - Start control request and wait until it completes
   * @ctl: Control channel to use
   * @req: Request to start
   * @timeout_msec: Timeout how long to wait @req to complete
   *
   * Starts a control request and waits until it completes. If timeout
   * triggers the request is canceled before function returns. Note the
   * caller needs to make sure only one message for given switch is active
   * at a time.
   */
  struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
  					 struct tb_cfg_request *req,
  					 int timeout_msec)
  {
  	unsigned long timeout = msecs_to_jiffies(timeout_msec);
  	struct tb_cfg_result res = { 0 };
  	DECLARE_COMPLETION_ONSTACK(done);
  	int ret;
  
  	ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
  	if (ret) {
  		res.err = ret;
  		return res;
  	}
  
  	if (!wait_for_completion_timeout(&done, timeout))
  		tb_cfg_request_cancel(req, -ETIMEDOUT);
  
  	flush_work(&req->work);
  
  	return req->result;
  }
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
604
605
606
607
608
609
610
611
612
613
  
  /* public interface, alloc/start/stop/free */
  
  /**
   * tb_ctl_alloc() - allocate a control channel
   *
   * cb will be invoked once for every hot plug event.
   *
   * Return: Returns a pointer on success or NULL on failure.
   */
81a54b5e1   Mika Westerberg   thunderbolt: Let ...
614
  struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
615
616
617
618
619
620
621
622
  {
  	int i;
  	struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
  	if (!ctl)
  		return NULL;
  	ctl->nhi = nhi;
  	ctl->callback = cb;
  	ctl->callback_data = cb_data;
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
623
624
  	mutex_init(&ctl->request_queue_lock);
  	INIT_LIST_HEAD(&ctl->request_queue);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
625
626
627
628
  	ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
  					 TB_FRAME_SIZE, 4, 0);
  	if (!ctl->frame_pool)
  		goto err;
3b3d9f4da   Mika Westerberg   thunderbolt: Expo...
629
  	ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
630
631
  	if (!ctl->tx)
  		goto err;
3b3d9f4da   Mika Westerberg   thunderbolt: Expo...
632
  	ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0xffff,
4ffe722ee   Mika Westerberg   thunderbolt: Add ...
633
  				0xffff, NULL, NULL);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
634
635
636
637
638
639
640
641
642
  	if (!ctl->rx)
  		goto err;
  
  	for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
  		ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
  		if (!ctl->rx_packets[i])
  			goto err;
  		ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
  	}
daa5140f7   Mika Westerberg   thunderbolt: Make...
643
644
  	tb_ctl_dbg(ctl, "control channel created
  ");
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
  	return ctl;
  err:
  	tb_ctl_free(ctl);
  	return NULL;
  }
  
  /**
   * tb_ctl_free() - free a control channel
   *
   * Must be called after tb_ctl_stop.
   *
   * Must NOT be called from ctl->callback.
   */
  void tb_ctl_free(struct tb_ctl *ctl)
  {
  	int i;
c9843ebbb   Mika Westerberg   thunderbolt: Allo...
661
662
663
  
  	if (!ctl)
  		return;
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
664
  	if (ctl->rx)
3b3d9f4da   Mika Westerberg   thunderbolt: Expo...
665
  		tb_ring_free(ctl->rx);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
666
  	if (ctl->tx)
3b3d9f4da   Mika Westerberg   thunderbolt: Expo...
667
  		tb_ring_free(ctl->tx);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
668
669
670
671
  
  	/* free RX packets */
  	for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
  		tb_ctl_pkg_free(ctl->rx_packets[i]);
0bb5a1a28   zhong jiang   thunderbolt: Remo...
672
  	dma_pool_destroy(ctl->frame_pool);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
673
674
675
676
677
678
679
680
681
  	kfree(ctl);
  }
  
  /**
   * tb_cfg_start() - start/resume the control channel
   */
  void tb_ctl_start(struct tb_ctl *ctl)
  {
  	int i;
daa5140f7   Mika Westerberg   thunderbolt: Make...
682
683
  	tb_ctl_dbg(ctl, "control channel starting...
  ");
3b3d9f4da   Mika Westerberg   thunderbolt: Expo...
684
685
  	tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
  	tb_ring_start(ctl->rx);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
686
687
  	for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
  		tb_ctl_rx_submit(ctl->rx_packets[i]);
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
688
689
  
  	ctl->running = true;
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
690
691
692
693
694
695
696
697
698
699
700
701
  }
  
  /**
   * control() - pause the control channel
   *
   * All invocations of ctl->callback will have finished after this method
   * returns.
   *
   * Must NOT be called from ctl->callback.
   */
  void tb_ctl_stop(struct tb_ctl *ctl)
  {
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
702
703
704
  	mutex_lock(&ctl->request_queue_lock);
  	ctl->running = false;
  	mutex_unlock(&ctl->request_queue_lock);
3b3d9f4da   Mika Westerberg   thunderbolt: Expo...
705
706
  	tb_ring_stop(ctl->rx);
  	tb_ring_stop(ctl->tx);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
707

d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
708
709
710
711
  	if (!list_empty(&ctl->request_queue))
  		tb_ctl_WARN(ctl, "dangling request in request_queue
  ");
  	INIT_LIST_HEAD(&ctl->request_queue);
daa5140f7   Mika Westerberg   thunderbolt: Make...
712
713
  	tb_ctl_dbg(ctl, "control channel stopped
  ");
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
714
715
716
717
718
  }
  
  /* public interface, commands */
  
  /**
210e9f56e   Mika Westerberg   thunderbolt: Popu...
719
720
721
722
723
   * tb_cfg_ack_plug() - Ack hot plug/unplug event
   * @ctl: Control channel to use
   * @route: Router that originated the event
   * @port: Port where the hot plug/unplug happened
   * @unplug: Ack hot plug or unplug
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
724
   *
210e9f56e   Mika Westerberg   thunderbolt: Popu...
725
726
   * Call this as response for hot plug/unplug event to ack it.
   * Returns %0 on success or an error code on failure.
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
727
   */
210e9f56e   Mika Westerberg   thunderbolt: Popu...
728
  int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
729
730
  {
  	struct cfg_error_pkg pkg = {
05c242e9e   Mika Westerberg   thunderbolt: Expo...
731
  		.header = tb_cfg_make_header(route),
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
732
  		.port = port,
210e9f56e   Mika Westerberg   thunderbolt: Popu...
733
734
735
  		.error = TB_CFG_ERROR_ACK_PLUG_EVENT,
  		.pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
  			     : TB_CFG_ERROR_PG_HOT_PLUG,
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
736
  	};
210e9f56e   Mika Westerberg   thunderbolt: Popu...
737
738
739
  	tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x
  ",
  		   unplug ? "un" : "", route, port);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
740
741
  	return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
  }
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
  static bool tb_cfg_match(const struct tb_cfg_request *req,
  			 const struct ctl_pkg *pkg)
  {
  	u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
  
  	if (pkg->frame.eof == TB_CFG_PKG_ERROR)
  		return true;
  
  	if (pkg->frame.eof != req->response_type)
  		return false;
  	if (route != tb_cfg_get_route(req->request))
  		return false;
  	if (pkg->frame.size != req->response_size)
  		return false;
  
  	if (pkg->frame.eof == TB_CFG_PKG_READ ||
  	    pkg->frame.eof == TB_CFG_PKG_WRITE) {
  		const struct cfg_read_pkg *req_hdr = req->request;
  		const struct cfg_read_pkg *res_hdr = pkg->buffer;
  
  		if (req_hdr->addr.seq != res_hdr->addr.seq)
  			return false;
  	}
  
  	return true;
  }
  
  static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
  {
  	struct tb_cfg_result res;
  
  	/* Now make sure it is in expected format */
  	res = parse_header(pkg, req->response_size, req->response_type,
  			   tb_cfg_get_route(req->request));
  	if (!res.err)
  		memcpy(req->response, pkg->buffer, req->response_size);
  
  	req->result = res;
  
  	/* Always complete when first response is received */
  	return true;
  }
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
784
785
786
787
788
789
790
791
792
793
  /**
   * tb_cfg_reset() - send a reset packet and wait for a response
   *
   * If the switch at route is incorrectly configured then we will not receive a
   * reply (even though the switch will reset). The caller should check for
   * -ETIMEDOUT and attempt to reconfigure the switch.
   */
  struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
  				  int timeout_msec)
  {
05c242e9e   Mika Westerberg   thunderbolt: Expo...
794
  	struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
795
  	struct tb_cfg_result res = { 0 };
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
796
  	struct tb_cfg_header reply;
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
  	struct tb_cfg_request *req;
  
  	req = tb_cfg_request_alloc();
  	if (!req) {
  		res.err = -ENOMEM;
  		return res;
  	}
  
  	req->match = tb_cfg_match;
  	req->copy = tb_cfg_copy;
  	req->request = &request;
  	req->request_size = sizeof(request);
  	req->request_type = TB_CFG_PKG_RESET;
  	req->response = &reply;
  	req->response_size = sizeof(reply);
02729d17b   Dan Carpenter   thunderbolt: Fix ...
812
  	req->response_type = TB_CFG_PKG_RESET;
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
813
814
  
  	res = tb_cfg_request_sync(ctl, req, timeout_msec);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
815

d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
816
  	tb_cfg_request_put(req);
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
817

d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
818
  	return res;
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
819
820
821
822
823
824
825
826
827
828
829
830
831
  }
  
  /**
   * tb_cfg_read() - read from config space into buffer
   *
   * Offset and length are in dwords.
   */
  struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
  		u64 route, u32 port, enum tb_cfg_space space,
  		u32 offset, u32 length, int timeout_msec)
  {
  	struct tb_cfg_result res = { 0 };
  	struct cfg_read_pkg request = {
05c242e9e   Mika Westerberg   thunderbolt: Expo...
832
  		.header = tb_cfg_make_header(route),
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
833
834
835
836
837
838
839
840
  		.addr = {
  			.port = port,
  			.space = space,
  			.offset = offset,
  			.length = length,
  		},
  	};
  	struct cfg_write_pkg reply;
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
841
  	int retries = 0;
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
842

d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
  	while (retries < TB_CTL_RETRIES) {
  		struct tb_cfg_request *req;
  
  		req = tb_cfg_request_alloc();
  		if (!req) {
  			res.err = -ENOMEM;
  			return res;
  		}
  
  		request.addr.seq = retries++;
  
  		req->match = tb_cfg_match;
  		req->copy = tb_cfg_copy;
  		req->request = &request;
  		req->request_size = sizeof(request);
  		req->request_type = TB_CFG_PKG_READ;
  		req->response = &reply;
  		req->response_size = 12 + 4 * length;
  		req->response_type = TB_CFG_PKG_READ;
  
  		res = tb_cfg_request_sync(ctl, req, timeout_msec);
  
  		tb_cfg_request_put(req);
  
  		if (res.err != -ETIMEDOUT)
  			break;
  
  		/* Wait a bit (arbitrary time) until we send a retry */
  		usleep_range(10, 100);
  	}
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
873

f25bf6fcb   Andreas Noever   thunderbolt: Add ...
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
  	if (res.err)
  		return res;
  
  	res.response_port = reply.addr.port;
  	res.err = check_config_address(reply.addr, space, offset, length);
  	if (!res.err)
  		memcpy(buffer, &reply.data, 4 * length);
  	return res;
  }
  
  /**
   * tb_cfg_write() - write from buffer into config space
   *
   * Offset and length are in dwords.
   */
16a1258af   Mika Westerberg   thunderbolt: Use ...
889
  struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
890
891
892
893
894
  		u64 route, u32 port, enum tb_cfg_space space,
  		u32 offset, u32 length, int timeout_msec)
  {
  	struct tb_cfg_result res = { 0 };
  	struct cfg_write_pkg request = {
05c242e9e   Mika Westerberg   thunderbolt: Expo...
895
  		.header = tb_cfg_make_header(route),
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
896
897
898
899
900
901
902
903
  		.addr = {
  			.port = port,
  			.space = space,
  			.offset = offset,
  			.length = length,
  		},
  	};
  	struct cfg_read_pkg reply;
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
904
  	int retries = 0;
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
905
906
  
  	memcpy(&request.data, buffer, length * 4);
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
  	while (retries < TB_CTL_RETRIES) {
  		struct tb_cfg_request *req;
  
  		req = tb_cfg_request_alloc();
  		if (!req) {
  			res.err = -ENOMEM;
  			return res;
  		}
  
  		request.addr.seq = retries++;
  
  		req->match = tb_cfg_match;
  		req->copy = tb_cfg_copy;
  		req->request = &request;
  		req->request_size = 12 + 4 * length;
  		req->request_type = TB_CFG_PKG_WRITE;
  		req->response = &reply;
  		req->response_size = sizeof(reply);
  		req->response_type = TB_CFG_PKG_WRITE;
  
  		res = tb_cfg_request_sync(ctl, req, timeout_msec);
  
  		tb_cfg_request_put(req);
  
  		if (res.err != -ETIMEDOUT)
  			break;
  
  		/* Wait a bit (arbitrary time) until we send a retry */
  		usleep_range(10, 100);
  	}
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
937

f25bf6fcb   Andreas Noever   thunderbolt: Add ...
938
939
940
941
942
943
944
  	if (res.err)
  		return res;
  
  	res.response_port = reply.addr.port;
  	res.err = check_config_address(reply.addr, space, offset, length);
  	return res;
  }
d94dcbb10   Mika Westerberg   thunderbolt: Do n...
945
946
947
948
949
950
951
952
953
954
955
956
957
958
  static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
  			    const struct tb_cfg_result *res)
  {
  	/*
  	 * For unimplemented ports access to port config space may return
  	 * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
  	 * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
  	 * that the caller can mark the port as disabled.
  	 */
  	if (space == TB_CFG_PORT &&
  	    res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
  		return -ENODEV;
  
  	tb_cfg_print_error(ctl, res);
80e7c5dd1   Mika Westerberg   thunderbolt: Hand...
959
960
961
  
  	if (res->tb_error == TB_CFG_ERROR_LOCK)
  		return -EACCES;
d94dcbb10   Mika Westerberg   thunderbolt: Do n...
962
963
  	return -EIO;
  }
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
964
965
966
967
968
  int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
  		enum tb_cfg_space space, u32 offset, u32 length)
  {
  	struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
  			space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
969
970
971
972
973
974
975
  	switch (res.err) {
  	case 0:
  		/* Success */
  		break;
  
  	case 1:
  		/* Thunderbolt error, tb_error holds the actual number */
d94dcbb10   Mika Westerberg   thunderbolt: Do n...
976
  		return tb_cfg_get_error(ctl, space, &res);
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
977
978
  
  	case -ETIMEDOUT:
68b91293c   Mika Westerberg   thunderbolt: Log ...
979
980
981
  		tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x
  ",
  			    route, space, offset);
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
982
983
984
985
986
987
  		break;
  
  	default:
  		WARN(1, "tb_cfg_read: %d
  ", res.err);
  		break;
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
988
  	}
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
989
990
  	return res.err;
  }
16a1258af   Mika Westerberg   thunderbolt: Use ...
991
  int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
992
993
994
995
  		 enum tb_cfg_space space, u32 offset, u32 length)
  {
  	struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
  			space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
996
997
998
999
1000
1001
1002
  	switch (res.err) {
  	case 0:
  		/* Success */
  		break;
  
  	case 1:
  		/* Thunderbolt error, tb_error holds the actual number */
d94dcbb10   Mika Westerberg   thunderbolt: Do n...
1003
  		return tb_cfg_get_error(ctl, space, &res);
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
1004
1005
  
  	case -ETIMEDOUT:
68b91293c   Mika Westerberg   thunderbolt: Log ...
1006
1007
1008
  		tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x
  ",
  			    route, space, offset);
d7f781bfd   Mika Westerberg   thunderbolt: Rewo...
1009
1010
1011
1012
1013
1014
  		break;
  
  	default:
  		WARN(1, "tb_cfg_write: %d
  ", res.err);
  		break;
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
1015
  	}
f25bf6fcb   Andreas Noever   thunderbolt: Add ...
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
  	return res.err;
  }
  
  /**
   * tb_cfg_get_upstream_port() - get upstream port number of switch at route
   *
   * Reads the first dword from the switches TB_CFG_SWITCH config area and
   * returns the port number from which the reply originated.
   *
   * Return: Returns the upstream port number on success or an error code on
   * failure.
   */
  int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
  {
  	u32 dummy;
  	struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
  						   TB_CFG_SWITCH, 0, 1,
  						   TB_CFG_DEFAULT_TIMEOUT);
  	if (res.err == 1)
  		return -EIO;
  	if (res.err)
  		return res.err;
  	return res.response_port;
  }