Blame view

drivers/gpu/drm/drm_dp_mst_topology.c 159 KB
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
  /*
   * Copyright © 2014 Red Hat
   *
   * Permission to use, copy, modify, distribute, and sell this software and its
   * documentation for any purpose is hereby granted without fee, provided that
   * the above copyright notice appear in all copies and that both that copyright
   * notice and this permission notice appear in supporting documentation, and
   * that the name of the copyright holders not be used in advertising or
   * publicity pertaining to distribution of the software without specific,
   * written prior permission.  The copyright holders make no representations
   * about the suitability of this software for any purpose.  It is provided "as
   * is" without express or implied warranty.
   *
   * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
   * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
   * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
   * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
   * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
   * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
   * OF THIS SOFTWARE.
   */
e38c298fc   Sean Paul   drm/mst: Add supp...
22
  #include <linux/bitfield.h>
ad7f8a1f9   Dave Airlie   drm/helper: add D...
23
  #include <linux/delay.h>
ad7f8a1f9   Dave Airlie   drm/helper: add D...
24
  #include <linux/errno.h>
580fc13f3   Jani Nikula   drm/dp: drmP.h in...
25
26
27
  #include <linux/i2c.h>
  #include <linux/init.h>
  #include <linux/kernel.h>
e38c298fc   Sean Paul   drm/mst: Add supp...
28
  #include <linux/random.h>
ad7f8a1f9   Dave Airlie   drm/helper: add D...
29
  #include <linux/sched.h>
75bc08ab2   Dave Airlie   drm/mst: fix buil...
30
  #include <linux/seq_file.h>
873a95e0d   Lyude Paul   drm/dp_mst: Incre...
31
  #include <linux/iopoll.h>
ad7f8a1f9   Dave Airlie   drm/helper: add D...
32

12a280c72   Lyude Paul   drm/dp_mst: Add t...
33
  #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
68acde762   Chenwandun   drm/dp_mst: fix g...
34
  #include <linux/stacktrace.h>
12a280c72   Lyude Paul   drm/dp_mst: Add t...
35
36
37
38
  #include <linux/sort.h>
  #include <linux/timekeeping.h>
  #include <linux/math64.h>
  #endif
a4370c777   Ville Syrjälä   drm/atomic: Make ...
39
40
  #include <drm/drm_atomic.h>
  #include <drm/drm_atomic_helper.h>
580fc13f3   Jani Nikula   drm/dp: drmP.h in...
41
42
  #include <drm/drm_dp_mst_helper.h>
  #include <drm/drm_drv.h>
580fc13f3   Jani Nikula   drm/dp: drmP.h in...
43
  #include <drm/drm_print.h>
fcd70cd36   Daniel Vetter   drm: Split out dr...
44
  #include <drm/drm_probe_helper.h>
ad7f8a1f9   Dave Airlie   drm/helper: add D...
45

562836a26   Ville Syrjälä   drm/dp_mst: Enabl...
46
  #include "drm_crtc_helper_internal.h"
2f015ec6e   Lyude Paul   drm/dp_mst: Add s...
47
  #include "drm_dp_mst_topology_internal.h"
562836a26   Ville Syrjälä   drm/dp_mst: Enabl...
48

ad7f8a1f9   Dave Airlie   drm/helper: add D...
49
50
51
52
53
54
55
  /**
   * DOC: dp mst helper
   *
   * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
   * protocol. The helpers contain a topology manager and bandwidth manager.
   * The helpers encapsulate the sending and received of sideband msgs.
   */
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
56
57
58
59
60
  struct drm_dp_pending_up_req {
  	struct drm_dp_sideband_msg_hdr hdr;
  	struct drm_dp_sideband_msg_req_body msg;
  	struct list_head next;
  };
ad7f8a1f9   Dave Airlie   drm/helper: add D...
61
62
  static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
  				  char *buf);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
63

d0757afd0   Lyude Paul   drm/dp_mst: Renam...
64
  static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
65
66
67
68
  
  static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
  				     int id,
  				     struct drm_dp_payload *payload);
562836a26   Ville Syrjälä   drm/dp_mst: Enabl...
69
70
71
  static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
  				 struct drm_dp_mst_port *port,
  				 int offset, int size, u8 *bytes);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
72
73
74
  static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
  				  struct drm_dp_mst_port *port,
  				  int offset, int size, u8 *bytes);
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
75
76
  static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
  				    struct drm_dp_mst_branch *mstb);
f79489074   Sean Paul   drm/dp_mst: Clear...
77
78
79
80
  
  static void
  drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
  				   struct drm_dp_mst_branch *mstb);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
81
82
83
84
85
  static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
  					   struct drm_dp_mst_branch *mstb,
  					   struct drm_dp_mst_port *port);
  static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
  				 u8 *guid);
d8bd15b37   Imre Deak   drm/dp_mst: Fix t...
86
87
  static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
  static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
88
  static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
3dadbd295   Ville Syrjälä   drm/dp/mst: Provi...
89

2f015ec6e   Lyude Paul   drm/dp_mst: Add s...
90
  #define DBG_PREFIX "[dp_mst]"
3dadbd295   Ville Syrjälä   drm/dp/mst: Provi...
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
  #define DP_STR(x) [DP_ ## x] = #x
  
  static const char *drm_dp_mst_req_type_str(u8 req_type)
  {
  	static const char * const req_type_str[] = {
  		DP_STR(GET_MSG_TRANSACTION_VERSION),
  		DP_STR(LINK_ADDRESS),
  		DP_STR(CONNECTION_STATUS_NOTIFY),
  		DP_STR(ENUM_PATH_RESOURCES),
  		DP_STR(ALLOCATE_PAYLOAD),
  		DP_STR(QUERY_PAYLOAD),
  		DP_STR(RESOURCE_STATUS_NOTIFY),
  		DP_STR(CLEAR_PAYLOAD_ID_TABLE),
  		DP_STR(REMOTE_DPCD_READ),
  		DP_STR(REMOTE_DPCD_WRITE),
  		DP_STR(REMOTE_I2C_READ),
  		DP_STR(REMOTE_I2C_WRITE),
  		DP_STR(POWER_UP_PHY),
  		DP_STR(POWER_DOWN_PHY),
  		DP_STR(SINK_EVENT_NOTIFY),
  		DP_STR(QUERY_STREAM_ENC_STATUS),
  	};
  
  	if (req_type >= ARRAY_SIZE(req_type_str) ||
  	    !req_type_str[req_type])
  		return "unknown";
  
  	return req_type_str[req_type];
  }
  
  #undef DP_STR
  #define DP_STR(x) [DP_NAK_ ## x] = #x
  
  static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
  {
  	static const char * const nak_reason_str[] = {
  		DP_STR(WRITE_FAILURE),
  		DP_STR(INVALID_READ),
  		DP_STR(CRC_FAILURE),
  		DP_STR(BAD_PARAM),
  		DP_STR(DEFER),
  		DP_STR(LINK_FAILURE),
  		DP_STR(NO_RESOURCES),
  		DP_STR(DPCD_FAIL),
  		DP_STR(I2C_NAK),
  		DP_STR(ALLOCATE_FAIL),
  	};
  
  	if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
  	    !nak_reason_str[nak_reason])
  		return "unknown";
  
  	return nak_reason_str[nak_reason];
  }
  
  #undef DP_STR
2f015ec6e   Lyude Paul   drm/dp_mst: Add s...
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
  #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
  
  static const char *drm_dp_mst_sideband_tx_state_str(int state)
  {
  	static const char * const sideband_reason_str[] = {
  		DP_STR(QUEUED),
  		DP_STR(START_SEND),
  		DP_STR(SENT),
  		DP_STR(RX),
  		DP_STR(TIMEOUT),
  	};
  
  	if (state >= ARRAY_SIZE(sideband_reason_str) ||
  	    !sideband_reason_str[state])
  		return "unknown";
  
  	return sideband_reason_str[state];
  }
  
  static int
  drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
  {
  	int i;
  	u8 unpacked_rad[16];
  
  	for (i = 0; i < lct; i++) {
  		if (i % 2)
  			unpacked_rad[i] = rad[i / 2] >> 4;
  		else
  			unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
  	}
  
  	/* TODO: Eventually add something to printk so we can format the rad
  	 * like this: 1.2.3
  	 */
  	return snprintf(out, len, "%*phC", lct, unpacked_rad);
  }
3dadbd295   Ville Syrjälä   drm/dp/mst: Provi...
184

ad7f8a1f9   Dave Airlie   drm/helper: add D...
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
  /* sideband msg handling */
  static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
  {
  	u8 bitmask = 0x80;
  	u8 bitshift = 7;
  	u8 array_index = 0;
  	int number_of_bits = num_nibbles * 4;
  	u8 remainder = 0;
  
  	while (number_of_bits != 0) {
  		number_of_bits--;
  		remainder <<= 1;
  		remainder |= (data[array_index] & bitmask) >> bitshift;
  		bitmask >>= 1;
  		bitshift--;
  		if (bitmask == 0) {
  			bitmask = 0x80;
  			bitshift = 7;
  			array_index++;
  		}
  		if ((remainder & 0x10) == 0x10)
  			remainder ^= 0x13;
  	}
  
  	number_of_bits = 4;
  	while (number_of_bits != 0) {
  		number_of_bits--;
  		remainder <<= 1;
  		if ((remainder & 0x10) != 0)
  			remainder ^= 0x13;
  	}
  
  	return remainder;
  }
  
  static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
  {
  	u8 bitmask = 0x80;
  	u8 bitshift = 7;
  	u8 array_index = 0;
  	int number_of_bits = number_of_bytes * 8;
  	u16 remainder = 0;
  
  	while (number_of_bits != 0) {
  		number_of_bits--;
  		remainder <<= 1;
  		remainder |= (data[array_index] & bitmask) >> bitshift;
  		bitmask >>= 1;
  		bitshift--;
  		if (bitmask == 0) {
  			bitmask = 0x80;
  			bitshift = 7;
  			array_index++;
  		}
  		if ((remainder & 0x100) == 0x100)
  			remainder ^= 0xd5;
  	}
  
  	number_of_bits = 8;
  	while (number_of_bits != 0) {
  		number_of_bits--;
  		remainder <<= 1;
  		if ((remainder & 0x100) != 0)
  			remainder ^= 0xd5;
  	}
  
  	return remainder & 0xff;
  }
  static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
  {
  	u8 size = 3;
948de8423   Suraj Upadhyay   drm : Insert blan...
256

ad7f8a1f9   Dave Airlie   drm/helper: add D...
257
258
259
260
261
262
263
264
265
266
  	size += (hdr->lct / 2);
  	return size;
  }
  
  static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
  					   u8 *buf, int *len)
  {
  	int idx = 0;
  	int i;
  	u8 crc4;
948de8423   Suraj Upadhyay   drm : Insert blan...
267

ad7f8a1f9   Dave Airlie   drm/helper: add D...
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
  	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
  	for (i = 0; i < (hdr->lct / 2); i++)
  		buf[idx++] = hdr->rad[i];
  	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
  		(hdr->msg_len & 0x3f);
  	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
  
  	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
  	buf[idx - 1] |= (crc4 & 0xf);
  
  	*len = idx;
  }
  
  static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
  					   u8 *buf, int buflen, u8 *hdrlen)
  {
  	u8 crc4;
  	u8 len;
  	int i;
  	u8 idx;
948de8423   Suraj Upadhyay   drm : Insert blan...
288

ad7f8a1f9   Dave Airlie   drm/helper: add D...
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
  	if (buf[0] == 0)
  		return false;
  	len = 3;
  	len += ((buf[0] & 0xf0) >> 4) / 2;
  	if (len > buflen)
  		return false;
  	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
  
  	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
  		DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x
  ", crc4, buf[len - 1]);
  		return false;
  	}
  
  	hdr->lct = (buf[0] & 0xf0) >> 4;
  	hdr->lcr = (buf[0] & 0xf);
  	idx = 1;
  	for (i = 0; i < (hdr->lct / 2); i++)
  		hdr->rad[i] = buf[idx++];
  	hdr->broadcast = (buf[idx] >> 7) & 0x1;
  	hdr->path_msg = (buf[idx] >> 6) & 0x1;
  	hdr->msg_len = buf[idx] & 0x3f;
  	idx++;
  	hdr->somt = (buf[idx] >> 7) & 0x1;
  	hdr->eomt = (buf[idx] >> 6) & 0x1;
  	hdr->seqno = (buf[idx] >> 4) & 0x1;
  	idx++;
  	*hdrlen = idx;
  	return true;
  }
2f015ec6e   Lyude Paul   drm/dp_mst: Add s...
319
320
321
  void
  drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
  			   struct drm_dp_sideband_msg_tx *raw)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
322
323
324
325
  {
  	int idx = 0;
  	int i;
  	u8 *buf = raw->msg;
948de8423   Suraj Upadhyay   drm : Insert blan...
326

ad7f8a1f9   Dave Airlie   drm/helper: add D...
327
328
329
330
  	buf[idx++] = req->req_type & 0x7f;
  
  	switch (req->req_type) {
  	case DP_ENUM_PATH_RESOURCES:
aa2a2fe13   Lyude Paul   drm/dp_mst: Combi...
331
332
  	case DP_POWER_DOWN_PHY:
  	case DP_POWER_UP_PHY:
ad7f8a1f9   Dave Airlie   drm/helper: add D...
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
  		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
  		idx++;
  		break;
  	case DP_ALLOCATE_PAYLOAD:
  		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
  			(req->u.allocate_payload.number_sdp_streams & 0xf);
  		idx++;
  		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
  		idx++;
  		buf[idx] = (req->u.allocate_payload.pbn >> 8);
  		idx++;
  		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
  		idx++;
  		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
  			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
  				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
  			idx++;
  		}
  		if (req->u.allocate_payload.number_sdp_streams & 1) {
  			i = req->u.allocate_payload.number_sdp_streams - 1;
  			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
  			idx++;
  		}
  		break;
  	case DP_QUERY_PAYLOAD:
  		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
  		idx++;
  		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
  		idx++;
  		break;
  	case DP_REMOTE_DPCD_READ:
  		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
  		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
  		idx++;
  		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
  		idx++;
  		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
  		idx++;
  		buf[idx] = (req->u.dpcd_read.num_bytes);
  		idx++;
  		break;
  
  	case DP_REMOTE_DPCD_WRITE:
  		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
  		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
  		idx++;
  		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
  		idx++;
  		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
  		idx++;
  		buf[idx] = (req->u.dpcd_write.num_bytes);
  		idx++;
  		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
  		idx += req->u.dpcd_write.num_bytes;
  		break;
  	case DP_REMOTE_I2C_READ:
  		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
  		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
  		idx++;
  		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
  			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
  			idx++;
  			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
  			idx++;
  			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
  			idx += req->u.i2c_read.transactions[i].num_bytes;
c4e4fccc5   Wayne Lin   drm/dp_mst: corre...
399
  			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
  			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
  			idx++;
  		}
  		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
  		idx++;
  		buf[idx] = (req->u.i2c_read.num_bytes_read);
  		idx++;
  		break;
  
  	case DP_REMOTE_I2C_WRITE:
  		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
  		idx++;
  		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
  		idx++;
  		buf[idx] = (req->u.i2c_write.num_bytes);
  		idx++;
  		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
  		idx += req->u.i2c_write.num_bytes;
  		break;
e38c298fc   Sean Paul   drm/mst: Add supp...
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
  	case DP_QUERY_STREAM_ENC_STATUS: {
  		const struct drm_dp_query_stream_enc_status *msg;
  
  		msg = &req->u.enc_status;
  		buf[idx] = msg->stream_id;
  		idx++;
  		memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id));
  		idx += sizeof(msg->client_id);
  		buf[idx] = 0;
  		buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event);
  		buf[idx] |= msg->valid_stream_event ? BIT(2) : 0;
  		buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior);
  		buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0;
  		idx++;
  		}
  		break;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
435
436
437
  	}
  	raw->cur_len = idx;
  }
2f015ec6e   Lyude Paul   drm/dp_mst: Add s...
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
  EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
  
  /* Decode a sideband request we've encoded, mainly used for debugging */
  int
  drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
  			   struct drm_dp_sideband_msg_req_body *req)
  {
  	const u8 *buf = raw->msg;
  	int i, idx = 0;
  
  	req->req_type = buf[idx++] & 0x7f;
  	switch (req->req_type) {
  	case DP_ENUM_PATH_RESOURCES:
  	case DP_POWER_DOWN_PHY:
  	case DP_POWER_UP_PHY:
  		req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
  		break;
  	case DP_ALLOCATE_PAYLOAD:
  		{
  			struct drm_dp_allocate_payload *a =
  				&req->u.allocate_payload;
  
  			a->number_sdp_streams = buf[idx] & 0xf;
  			a->port_number = (buf[idx] >> 4) & 0xf;
  
  			WARN_ON(buf[++idx] & 0x80);
  			a->vcpi = buf[idx] & 0x7f;
  
  			a->pbn = buf[++idx] << 8;
  			a->pbn |= buf[++idx];
  
  			idx++;
  			for (i = 0; i < a->number_sdp_streams; i++) {
  				a->sdp_stream_sink[i] =
  					(buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
  			}
  		}
  		break;
  	case DP_QUERY_PAYLOAD:
  		req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
  		WARN_ON(buf[++idx] & 0x80);
  		req->u.query_payload.vcpi = buf[idx] & 0x7f;
  		break;
  	case DP_REMOTE_DPCD_READ:
  		{
  			struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
  
  			r->port_number = (buf[idx] >> 4) & 0xf;
  
  			r->dpcd_address = (buf[idx] << 16) & 0xf0000;
  			r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
  			r->dpcd_address |= buf[++idx] & 0xff;
  
  			r->num_bytes = buf[++idx];
  		}
  		break;
  	case DP_REMOTE_DPCD_WRITE:
  		{
  			struct drm_dp_remote_dpcd_write *w =
  				&req->u.dpcd_write;
  
  			w->port_number = (buf[idx] >> 4) & 0xf;
  
  			w->dpcd_address = (buf[idx] << 16) & 0xf0000;
  			w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
  			w->dpcd_address |= buf[++idx] & 0xff;
  
  			w->num_bytes = buf[++idx];
  
  			w->bytes = kmemdup(&buf[++idx], w->num_bytes,
  					   GFP_KERNEL);
  			if (!w->bytes)
  				return -ENOMEM;
  		}
  		break;
  	case DP_REMOTE_I2C_READ:
  		{
  			struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
  			struct drm_dp_remote_i2c_read_tx *tx;
  			bool failed = false;
  
  			r->num_transactions = buf[idx] & 0x3;
  			r->port_number = (buf[idx] >> 4) & 0xf;
  			for (i = 0; i < r->num_transactions; i++) {
  				tx = &r->transactions[i];
  
  				tx->i2c_dev_id = buf[++idx] & 0x7f;
  				tx->num_bytes = buf[++idx];
  				tx->bytes = kmemdup(&buf[++idx],
  						    tx->num_bytes,
  						    GFP_KERNEL);
  				if (!tx->bytes) {
  					failed = true;
  					break;
  				}
  				idx += tx->num_bytes;
  				tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
  				tx->i2c_transaction_delay = buf[idx] & 0xf;
  			}
  
  			if (failed) {
2c8bc9148   Colin Ian King   drm/dp_mst: fix m...
539
540
  				for (i = 0; i < r->num_transactions; i++) {
  					tx = &r->transactions[i];
2f015ec6e   Lyude Paul   drm/dp_mst: Add s...
541
  					kfree(tx->bytes);
2c8bc9148   Colin Ian King   drm/dp_mst: fix m...
542
  				}
2f015ec6e   Lyude Paul   drm/dp_mst: Add s...
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
  				return -ENOMEM;
  			}
  
  			r->read_i2c_device_id = buf[++idx] & 0x7f;
  			r->num_bytes_read = buf[++idx];
  		}
  		break;
  	case DP_REMOTE_I2C_WRITE:
  		{
  			struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
  
  			w->port_number = (buf[idx] >> 4) & 0xf;
  			w->write_i2c_device_id = buf[++idx] & 0x7f;
  			w->num_bytes = buf[++idx];
  			w->bytes = kmemdup(&buf[++idx], w->num_bytes,
  					   GFP_KERNEL);
  			if (!w->bytes)
  				return -ENOMEM;
  		}
  		break;
e38c298fc   Sean Paul   drm/mst: Add supp...
563
564
565
566
567
568
569
570
571
572
573
574
575
576
  	case DP_QUERY_STREAM_ENC_STATUS:
  		req->u.enc_status.stream_id = buf[idx++];
  		for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)
  			req->u.enc_status.client_id[i] = buf[idx++];
  
  		req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),
  							   buf[idx]);
  		req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),
  								 buf[idx]);
  		req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),
  							      buf[idx]);
  		req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),
  								    buf[idx]);
  		break;
2f015ec6e   Lyude Paul   drm/dp_mst: Add s...
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
  	}
  
  	return 0;
  }
  EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
  
  void
  drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
  				  int indent, struct drm_printer *printer)
  {
  	int i;
  
  #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
  	if (req->req_type == DP_LINK_ADDRESS) {
  		/* No contents to print */
  		P("type=%s
  ", drm_dp_mst_req_type_str(req->req_type));
  		return;
  	}
  
  	P("type=%s contents:
  ", drm_dp_mst_req_type_str(req->req_type));
  	indent++;
  
  	switch (req->req_type) {
  	case DP_ENUM_PATH_RESOURCES:
  	case DP_POWER_DOWN_PHY:
  	case DP_POWER_UP_PHY:
  		P("port=%d
  ", req->u.port_num.port_number);
  		break;
  	case DP_ALLOCATE_PAYLOAD:
  		P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph
  ",
  		  req->u.allocate_payload.port_number,
  		  req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
  		  req->u.allocate_payload.number_sdp_streams,
  		  req->u.allocate_payload.number_sdp_streams,
  		  req->u.allocate_payload.sdp_stream_sink);
  		break;
  	case DP_QUERY_PAYLOAD:
  		P("port=%d vcpi=%d
  ",
  		  req->u.query_payload.port_number,
  		  req->u.query_payload.vcpi);
  		break;
  	case DP_REMOTE_DPCD_READ:
  		P("port=%d dpcd_addr=%05x len=%d
  ",
  		  req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
  		  req->u.dpcd_read.num_bytes);
  		break;
  	case DP_REMOTE_DPCD_WRITE:
  		P("port=%d addr=%05x len=%d: %*ph
  ",
  		  req->u.dpcd_write.port_number,
  		  req->u.dpcd_write.dpcd_address,
  		  req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
  		  req->u.dpcd_write.bytes);
  		break;
  	case DP_REMOTE_I2C_READ:
  		P("port=%d num_tx=%d id=%d size=%d:
  ",
  		  req->u.i2c_read.port_number,
  		  req->u.i2c_read.num_transactions,
  		  req->u.i2c_read.read_i2c_device_id,
  		  req->u.i2c_read.num_bytes_read);
  
  		indent++;
  		for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
  			const struct drm_dp_remote_i2c_read_tx *rtx =
  				&req->u.i2c_read.transactions[i];
  
  			P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph
  ",
  			  i, rtx->i2c_dev_id, rtx->num_bytes,
  			  rtx->no_stop_bit, rtx->i2c_transaction_delay,
  			  rtx->num_bytes, rtx->bytes);
  		}
  		break;
  	case DP_REMOTE_I2C_WRITE:
  		P("port=%d id=%d size=%d: %*ph
  ",
  		  req->u.i2c_write.port_number,
  		  req->u.i2c_write.write_i2c_device_id,
  		  req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
  		  req->u.i2c_write.bytes);
  		break;
e38c298fc   Sean Paul   drm/mst: Add supp...
665
666
667
668
669
670
671
672
673
674
  	case DP_QUERY_STREAM_ENC_STATUS:
  		P("stream_id=%u client_id=%*ph stream_event=%x "
  		  "valid_event=%d stream_behavior=%x valid_behavior=%d",
  		  req->u.enc_status.stream_id,
  		  (int)ARRAY_SIZE(req->u.enc_status.client_id),
  		  req->u.enc_status.client_id, req->u.enc_status.stream_event,
  		  req->u.enc_status.valid_stream_event,
  		  req->u.enc_status.stream_behavior,
  		  req->u.enc_status.valid_stream_behavior);
  		break;
2f015ec6e   Lyude Paul   drm/dp_mst: Add s...
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
  	default:
  		P("???
  ");
  		break;
  	}
  #undef P
  }
  EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
  
  static inline void
  drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
  				const struct drm_dp_sideband_msg_tx *txmsg)
  {
  	struct drm_dp_sideband_msg_req_body req;
  	char buf[64];
  	int ret;
  	int i;
  
  	drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
  			      sizeof(buf));
  	drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s
  ",
  		   txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
  		   drm_dp_mst_sideband_tx_state_str(txmsg->state),
  		   txmsg->path_msg, buf);
  
  	ret = drm_dp_decode_sideband_req(txmsg, &req);
  	if (ret) {
  		drm_printf(p, "<failed to decode sideband req: %d>
  ", ret);
  		return;
  	}
  	drm_dp_dump_sideband_msg_req_body(&req, 1, p);
  
  	switch (req.req_type) {
  	case DP_REMOTE_DPCD_WRITE:
  		kfree(req.u.dpcd_write.bytes);
  		break;
  	case DP_REMOTE_I2C_READ:
  		for (i = 0; i < req.u.i2c_read.num_transactions; i++)
  			kfree(req.u.i2c_read.transactions[i].bytes);
  		break;
  	case DP_REMOTE_I2C_WRITE:
  		kfree(req.u.i2c_write.bytes);
  		break;
  	}
  }
ad7f8a1f9   Dave Airlie   drm/helper: add D...
722
723
724
725
  
  static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
  {
  	u8 crc4;
948de8423   Suraj Upadhyay   drm : Insert blan...
726

ad7f8a1f9   Dave Airlie   drm/helper: add D...
727
728
729
730
731
732
733
734
735
736
737
738
739
740
  	crc4 = drm_dp_msg_data_crc4(msg, len);
  	msg[len] = crc4;
  }
  
  static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
  					 struct drm_dp_sideband_msg_tx *raw)
  {
  	int idx = 0;
  	u8 *buf = raw->msg;
  
  	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
  
  	raw->cur_len = idx;
  }
21a729d00   Sean Paul   drm/mst: Separate...
741
742
743
  static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
  					  struct drm_dp_sideband_msg_hdr *hdr,
  					  u8 hdrlen)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
744
  {
21a729d00   Sean Paul   drm/mst: Separate...
745
746
747
748
749
750
  	/*
  	 * ignore out-of-order messages or messages that are part of a
  	 * failed transaction
  	 */
  	if (!hdr->somt && !msg->have_somt)
  		return false;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
751

21a729d00   Sean Paul   drm/mst: Separate...
752
753
754
755
  	/* get length contained in this portion */
  	msg->curchunk_idx = 0;
  	msg->curchunk_len = hdr->msg_len;
  	msg->curchunk_hdrlen = hdrlen;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
756

21a729d00   Sean Paul   drm/mst: Separate...
757
758
759
  	/* we have already gotten an somt - don't bother parsing */
  	if (hdr->somt && msg->have_somt)
  		return false;
636c4c3e7   Imre Deak   drm/mst: Avoid pr...
760

21a729d00   Sean Paul   drm/mst: Separate...
761
762
763
764
765
766
767
  	if (hdr->somt) {
  		memcpy(&msg->initial_hdr, hdr,
  		       sizeof(struct drm_dp_sideband_msg_hdr));
  		msg->have_somt = true;
  	}
  	if (hdr->eomt)
  		msg->have_eomt = true;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
768

21a729d00   Sean Paul   drm/mst: Separate...
769
770
  	return true;
  }
ad7f8a1f9   Dave Airlie   drm/helper: add D...
771

21a729d00   Sean Paul   drm/mst: Separate...
772
773
774
775
776
  /* this adds a chunk of msg to the builder to get the final msg */
  static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
  					   u8 *replybuf, u8 replybuflen)
  {
  	u8 crc4;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
777

21a729d00   Sean Paul   drm/mst: Separate...
778
779
  	memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
  	msg->curchunk_idx += replybuflen;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
780
781
782
783
  
  	if (msg->curchunk_idx >= msg->curchunk_len) {
  		/* do CRC */
  		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
d6b8bbca6   Benjamin Gaignard   drm/dp_mst: Check...
784
785
786
787
  		if (crc4 != msg->chunk[msg->curchunk_len - 1])
  			print_hex_dump(KERN_DEBUG, "wrong crc",
  				       DUMP_PREFIX_NONE, 16, 1,
  				       msg->chunk,  msg->curchunk_len, false);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
788
789
790
791
792
793
794
795
796
797
798
799
  		/* copy chunk into bigger msg */
  		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
  		msg->curlen += msg->curchunk_len - 1;
  	}
  	return true;
  }
  
  static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
  					       struct drm_dp_sideband_msg_reply_body *repmsg)
  {
  	int idx = 1;
  	int i;
948de8423   Suraj Upadhyay   drm : Insert blan...
800

ad7f8a1f9   Dave Airlie   drm/helper: add D...
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
  	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
  	idx += 16;
  	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
  	idx++;
  	if (idx > raw->curlen)
  		goto fail_len;
  	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
  		if (raw->msg[idx] & 0x80)
  			repmsg->u.link_addr.ports[i].input_port = 1;
  
  		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
  		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
  
  		idx++;
  		if (idx > raw->curlen)
  			goto fail_len;
  		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
  		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
  		if (repmsg->u.link_addr.ports[i].input_port == 0)
  			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
  		idx++;
  		if (idx > raw->curlen)
  			goto fail_len;
  		if (repmsg->u.link_addr.ports[i].input_port == 0) {
  			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
  			idx++;
  			if (idx > raw->curlen)
  				goto fail_len;
  			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
  			idx += 16;
  			if (idx > raw->curlen)
  				goto fail_len;
  			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
  			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
  			idx++;
  
  		}
  		if (idx > raw->curlen)
  			goto fail_len;
  	}
  
  	return true;
  fail_len:
  	DRM_DEBUG_KMS("link address reply parse length fail %d %d
  ", idx, raw->curlen);
  	return false;
  }
  
  static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
  						   struct drm_dp_sideband_msg_reply_body *repmsg)
  {
  	int idx = 1;
948de8423   Suraj Upadhyay   drm : Insert blan...
853

ad7f8a1f9   Dave Airlie   drm/helper: add D...
854
855
856
857
858
  	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
  	idx++;
  	if (idx > raw->curlen)
  		goto fail_len;
  	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
a4c30a486   Hans Verkuil   drm_dp_mst_topolo...
859
  	idx++;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
  	if (idx > raw->curlen)
  		goto fail_len;
  
  	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
  	return true;
  fail_len:
  	DRM_DEBUG_KMS("link address reply parse length fail %d %d
  ", idx, raw->curlen);
  	return false;
  }
  
  static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
  						      struct drm_dp_sideband_msg_reply_body *repmsg)
  {
  	int idx = 1;
948de8423   Suraj Upadhyay   drm : Insert blan...
875

ad7f8a1f9   Dave Airlie   drm/helper: add D...
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
  	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
  	idx++;
  	if (idx > raw->curlen)
  		goto fail_len;
  	return true;
  fail_len:
  	DRM_DEBUG_KMS("parse length fail %d %d
  ", idx, raw->curlen);
  	return false;
  }
  
  static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
  						      struct drm_dp_sideband_msg_reply_body *repmsg)
  {
  	int idx = 1;
  
  	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
  	idx++;
  	if (idx > raw->curlen)
  		goto fail_len;
  	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
  	idx++;
  	/* TODO check */
  	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
  	return true;
  fail_len:
  	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d
  ", idx, raw->curlen);
  	return false;
  }
  
  static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
  							  struct drm_dp_sideband_msg_reply_body *repmsg)
  {
  	int idx = 1;
948de8423   Suraj Upadhyay   drm : Insert blan...
911

ad7f8a1f9   Dave Airlie   drm/helper: add D...
912
  	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
a3c2b0ffc   David Francis   drm/dp_mst: Parse...
913
  	repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
  	idx++;
  	if (idx > raw->curlen)
  		goto fail_len;
  	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
  	idx += 2;
  	if (idx > raw->curlen)
  		goto fail_len;
  	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
  	idx += 2;
  	if (idx > raw->curlen)
  		goto fail_len;
  	return true;
  fail_len:
  	DRM_DEBUG_KMS("enum resource parse length fail %d %d
  ", idx, raw->curlen);
  	return false;
  }
  
  static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
  							  struct drm_dp_sideband_msg_reply_body *repmsg)
  {
  	int idx = 1;
948de8423   Suraj Upadhyay   drm : Insert blan...
936

ad7f8a1f9   Dave Airlie   drm/helper: add D...
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
  	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
  	idx++;
  	if (idx > raw->curlen)
  		goto fail_len;
  	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
  	idx++;
  	if (idx > raw->curlen)
  		goto fail_len;
  	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
  	idx += 2;
  	if (idx > raw->curlen)
  		goto fail_len;
  	return true;
  fail_len:
  	DRM_DEBUG_KMS("allocate payload parse length fail %d %d
  ", idx, raw->curlen);
  	return false;
  }
  
  static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
  						    struct drm_dp_sideband_msg_reply_body *repmsg)
  {
  	int idx = 1;
948de8423   Suraj Upadhyay   drm : Insert blan...
960

ad7f8a1f9   Dave Airlie   drm/helper: add D...
961
962
963
964
965
966
967
968
969
970
971
972
973
974
  	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
  	idx++;
  	if (idx > raw->curlen)
  		goto fail_len;
  	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
  	idx += 2;
  	if (idx > raw->curlen)
  		goto fail_len;
  	return true;
  fail_len:
  	DRM_DEBUG_KMS("query payload parse length fail %d %d
  ", idx, raw->curlen);
  	return false;
  }
0bb9c2b27   Dhinakaran Pandiyan   drm/dp/mst: Sideb...
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
  static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
  						       struct drm_dp_sideband_msg_reply_body *repmsg)
  {
  	int idx = 1;
  
  	repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
  	idx++;
  	if (idx > raw->curlen) {
  		DRM_DEBUG_KMS("power up/down phy parse length fail %d %d
  ",
  			      idx, raw->curlen);
  		return false;
  	}
  	return true;
  }
e38c298fc   Sean Paul   drm/mst: Add supp...
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
  static bool
  drm_dp_sideband_parse_query_stream_enc_status(
  				struct drm_dp_sideband_msg_rx *raw,
  				struct drm_dp_sideband_msg_reply_body *repmsg)
  {
  	struct drm_dp_query_stream_enc_status_ack_reply *reply;
  
  	reply = &repmsg->u.enc_status;
  
  	reply->stream_id = raw->msg[3];
  
  	reply->reply_signed = raw->msg[2] & BIT(0);
  
  	/*
  	 * NOTE: It's my impression from reading the spec that the below parsing
  	 * is correct. However I noticed while testing with an HDCP 1.4 display
  	 * through an HDCP 2.2 hub that only bit 3 was set. In that case, I
  	 * would expect both bits to be set. So keep the parsing following the
  	 * spec, but beware reality might not match the spec (at least for some
  	 * configurations).
  	 */
  	reply->hdcp_1x_device_present = raw->msg[2] & BIT(4);
  	reply->hdcp_2x_device_present = raw->msg[2] & BIT(3);
  
  	reply->query_capable_device_present = raw->msg[2] & BIT(5);
  	reply->legacy_device_present = raw->msg[2] & BIT(6);
  	reply->unauthorizable_device_present = raw->msg[2] & BIT(7);
  
  	reply->auth_completed = !!(raw->msg[1] & BIT(3));
  	reply->encryption_enabled = !!(raw->msg[1] & BIT(4));
  	reply->repeater_present = !!(raw->msg[1] & BIT(5));
  	reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6;
  
  	return true;
  }
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1025
1026
1027
1028
1029
1030
  static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
  					struct drm_dp_sideband_msg_reply_body *msg)
  {
  	memset(msg, 0, sizeof(*msg));
  	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
  	msg->req_type = (raw->msg[0] & 0x7f);
45bbda1e3   Ville Syrjälä   drm/dp/mst: Provi...
1031
  	if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
  		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
  		msg->u.nak.reason = raw->msg[17];
  		msg->u.nak.nak_data = raw->msg[18];
  		return false;
  	}
  
  	switch (msg->req_type) {
  	case DP_LINK_ADDRESS:
  		return drm_dp_sideband_parse_link_address(raw, msg);
  	case DP_QUERY_PAYLOAD:
  		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
  	case DP_REMOTE_DPCD_READ:
  		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
  	case DP_REMOTE_DPCD_WRITE:
  		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
  	case DP_REMOTE_I2C_READ:
  		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
adb48b269   Sam McNally   drm/dp_mst: Suppo...
1049
1050
  	case DP_REMOTE_I2C_WRITE:
  		return true; /* since there's nothing to parse */
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1051
1052
1053
1054
  	case DP_ENUM_PATH_RESOURCES:
  		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
  	case DP_ALLOCATE_PAYLOAD:
  		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
0bb9c2b27   Dhinakaran Pandiyan   drm/dp/mst: Sideb...
1055
1056
1057
  	case DP_POWER_DOWN_PHY:
  	case DP_POWER_UP_PHY:
  		return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
f79489074   Sean Paul   drm/dp_mst: Clear...
1058
1059
  	case DP_CLEAR_PAYLOAD_ID_TABLE:
  		return true; /* since there's nothing to parse */
e38c298fc   Sean Paul   drm/mst: Add supp...
1060
1061
  	case DP_QUERY_STREAM_ENC_STATUS:
  		return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1062
  	default:
3dadbd295   Ville Syrjälä   drm/dp/mst: Provi...
1063
1064
1065
  		DRM_ERROR("Got unknown reply 0x%02x (%s)
  ", msg->req_type,
  			  drm_dp_mst_req_type_str(msg->req_type));
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
  		return false;
  	}
  }
  
  static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
  							   struct drm_dp_sideband_msg_req_body *msg)
  {
  	int idx = 1;
  
  	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
  	idx++;
  	if (idx > raw->curlen)
  		goto fail_len;
  
  	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
  	idx += 16;
  	if (idx > raw->curlen)
  		goto fail_len;
  
  	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
  	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
  	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
  	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
  	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
  	idx++;
  	return true;
  fail_len:
  	DRM_DEBUG_KMS("connection status reply parse length fail %d %d
  ", idx, raw->curlen);
  	return false;
  }
  
  static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
  							   struct drm_dp_sideband_msg_req_body *msg)
  {
  	int idx = 1;
  
  	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
  	idx++;
  	if (idx > raw->curlen)
  		goto fail_len;
  
  	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
  	idx += 16;
  	if (idx > raw->curlen)
  		goto fail_len;
  
  	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
  	idx++;
  	return true;
  fail_len:
  	DRM_DEBUG_KMS("resource status reply parse length fail %d %d
  ", idx, raw->curlen);
  	return false;
  }
  
  static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
  				      struct drm_dp_sideband_msg_req_body *msg)
  {
  	memset(msg, 0, sizeof(*msg));
  	msg->req_type = (raw->msg[0] & 0x7f);
  
  	switch (msg->req_type) {
  	case DP_CONNECTION_STATUS_NOTIFY:
  		return drm_dp_sideband_parse_connection_status_notify(raw, msg);
  	case DP_RESOURCE_STATUS_NOTIFY:
  		return drm_dp_sideband_parse_resource_status_notify(raw, msg);
  	default:
3dadbd295   Ville Syrjälä   drm/dp/mst: Provi...
1134
1135
1136
  		DRM_ERROR("Got unknown request 0x%02x (%s)
  ", msg->req_type,
  			  drm_dp_mst_req_type_str(msg->req_type));
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1137
1138
1139
  		return false;
  	}
  }
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
1140
1141
  static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
  			     u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1142
1143
1144
1145
1146
1147
1148
1149
1150
  {
  	struct drm_dp_sideband_msg_req_body req;
  
  	req.req_type = DP_REMOTE_DPCD_WRITE;
  	req.u.dpcd_write.port_number = port_num;
  	req.u.dpcd_write.dpcd_address = offset;
  	req.u.dpcd_write.num_bytes = num_bytes;
  	req.u.dpcd_write.bytes = bytes;
  	drm_dp_encode_sideband_req(&req, msg);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1151
  }
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
1152
  static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1153
1154
1155
1156
1157
  {
  	struct drm_dp_sideband_msg_req_body req;
  
  	req.req_type = DP_LINK_ADDRESS;
  	drm_dp_encode_sideband_req(&req, msg);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1158
  }
bdf7e3b78   Maya Rashish   drm/dp_mst: make ...
1159
  static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
f79489074   Sean Paul   drm/dp_mst: Clear...
1160
1161
1162
1163
1164
  {
  	struct drm_dp_sideband_msg_req_body req;
  
  	req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
  	drm_dp_encode_sideband_req(&req, msg);
f79489074   Sean Paul   drm/dp_mst: Clear...
1165
  }
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
1166
1167
  static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
  				     int port_num)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1168
1169
1170
1171
1172
1173
1174
1175
1176
  {
  	struct drm_dp_sideband_msg_req_body req;
  
  	req.req_type = DP_ENUM_PATH_RESOURCES;
  	req.u.port_num.port_number = port_num;
  	drm_dp_encode_sideband_req(&req, msg);
  	msg->path_msg = true;
  	return 0;
  }
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
1177
1178
1179
1180
1181
  static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
  				   int port_num,
  				   u8 vcpi, uint16_t pbn,
  				   u8 number_sdp_streams,
  				   u8 *sdp_stream_sink)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1182
1183
  {
  	struct drm_dp_sideband_msg_req_body req;
948de8423   Suraj Upadhyay   drm : Insert blan...
1184

ad7f8a1f9   Dave Airlie   drm/helper: add D...
1185
1186
1187
1188
1189
  	memset(&req, 0, sizeof(req));
  	req.req_type = DP_ALLOCATE_PAYLOAD;
  	req.u.allocate_payload.port_number = port_num;
  	req.u.allocate_payload.vcpi = vcpi;
  	req.u.allocate_payload.pbn = pbn;
ef8f9bea1   Libin Yang   dp/mst: add SDP s...
1190
1191
1192
  	req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
  	memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
  		   number_sdp_streams);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1193
1194
  	drm_dp_encode_sideband_req(&req, msg);
  	msg->path_msg = true;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1195
  }
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
1196
1197
  static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
  				   int port_num, bool power_up)
0bb9c2b27   Dhinakaran Pandiyan   drm/dp/mst: Sideb...
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
  {
  	struct drm_dp_sideband_msg_req_body req;
  
  	if (power_up)
  		req.req_type = DP_POWER_UP_PHY;
  	else
  		req.req_type = DP_POWER_DOWN_PHY;
  
  	req.u.port_num.port_number = port_num;
  	drm_dp_encode_sideband_req(&req, msg);
  	msg->path_msg = true;
0bb9c2b27   Dhinakaran Pandiyan   drm/dp/mst: Sideb...
1209
  }
e38c298fc   Sean Paul   drm/mst: Add supp...
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
  static int
  build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
  			      u8 *q_id)
  {
  	struct drm_dp_sideband_msg_req_body req;
  
  	req.req_type = DP_QUERY_STREAM_ENC_STATUS;
  	req.u.enc_status.stream_id = stream_id;
  	memcpy(req.u.enc_status.client_id, q_id,
  	       sizeof(req.u.enc_status.client_id));
  	req.u.enc_status.stream_event = 0;
  	req.u.enc_status.valid_stream_event = false;
  	req.u.enc_status.stream_behavior = 0;
  	req.u.enc_status.valid_stream_behavior = false;
  
  	drm_dp_encode_sideband_req(&req, msg);
  	return 0;
  }
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1228
1229
1230
  static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
  					struct drm_dp_vcpi *vcpi)
  {
dfda0df34   Dave Airlie   drm/mst: rework p...
1231
  	int ret, vcpi_ret;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1232
1233
1234
1235
1236
1237
1238
1239
1240
  
  	mutex_lock(&mgr->payload_lock);
  	ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
  	if (ret > mgr->max_payloads) {
  		ret = -EINVAL;
  		DRM_DEBUG_KMS("out of payload ids %d
  ", ret);
  		goto out_unlock;
  	}
dfda0df34   Dave Airlie   drm/mst: rework p...
1241
1242
1243
1244
1245
1246
1247
  	vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
  	if (vcpi_ret > mgr->max_payloads) {
  		ret = -EINVAL;
  		DRM_DEBUG_KMS("out of vcpi ids %d
  ", ret);
  		goto out_unlock;
  	}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1248
  	set_bit(ret, &mgr->payload_mask);
dfda0df34   Dave Airlie   drm/mst: rework p...
1249
1250
  	set_bit(vcpi_ret, &mgr->vcpi_mask);
  	vcpi->vcpi = vcpi_ret + 1;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1251
1252
1253
1254
1255
1256
1257
  	mgr->proposed_vcpis[ret - 1] = vcpi;
  out_unlock:
  	mutex_unlock(&mgr->payload_lock);
  	return ret;
  }
  
  static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
dfda0df34   Dave Airlie   drm/mst: rework p...
1258
  				      int vcpi)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1259
  {
dfda0df34   Dave Airlie   drm/mst: rework p...
1260
  	int i;
948de8423   Suraj Upadhyay   drm : Insert blan...
1261

dfda0df34   Dave Airlie   drm/mst: rework p...
1262
  	if (vcpi == 0)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1263
1264
1265
  		return;
  
  	mutex_lock(&mgr->payload_lock);
dfda0df34   Dave Airlie   drm/mst: rework p...
1266
1267
1268
1269
1270
  	DRM_DEBUG_KMS("putting payload %d
  ", vcpi);
  	clear_bit(vcpi - 1, &mgr->vcpi_mask);
  
  	for (i = 0; i < mgr->max_payloads; i++) {
db0cc143b   Ville Syrjälä   drm/dp/mst: Reduc...
1271
1272
1273
1274
1275
  		if (mgr->proposed_vcpis[i] &&
  		    mgr->proposed_vcpis[i]->vcpi == vcpi) {
  			mgr->proposed_vcpis[i] = NULL;
  			clear_bit(i + 1, &mgr->payload_mask);
  		}
dfda0df34   Dave Airlie   drm/mst: rework p...
1276
  	}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1277
1278
1279
1280
1281
1282
  	mutex_unlock(&mgr->payload_lock);
  }
  
  static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
  			      struct drm_dp_sideband_msg_tx *txmsg)
  {
992d38cc7   Chris Wilson   drm/dp: Read the ...
1283
  	unsigned int state;
cd961bb9e   Daniel Vetter   drm/mst: fix recu...
1284
1285
1286
1287
1288
1289
  
  	/*
  	 * All updates to txmsg->state are protected by mgr->qlock, and the two
  	 * cases we check here are terminal states. For those the barriers
  	 * provided by the wake_up/wait_event pair are enough.
  	 */
992d38cc7   Chris Wilson   drm/dp: Read the ...
1290
1291
1292
  	state = READ_ONCE(txmsg->state);
  	return (state == DRM_DP_SIDEBAND_TX_RX ||
  		state == DRM_DP_SIDEBAND_TX_TIMEOUT);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1293
1294
1295
1296
1297
1298
  }
  
  static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
  				    struct drm_dp_sideband_msg_tx *txmsg)
  {
  	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
471bdd0df   Imre Deak   drm/i915/dp_mst: ...
1299
1300
  	unsigned long wait_timeout = msecs_to_jiffies(4000);
  	unsigned long wait_expires = jiffies + wait_timeout;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1301
  	int ret;
471bdd0df   Imre Deak   drm/i915/dp_mst: ...
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
  	for (;;) {
  		/*
  		 * If the driver provides a way for this, change to
  		 * poll-waiting for the MST reply interrupt if we didn't receive
  		 * it for 50 msec. This would cater for cases where the HPD
  		 * pulse signal got lost somewhere, even though the sink raised
  		 * the corresponding MST interrupt correctly. One example is the
  		 * Club 3D CAC-1557 TypeC -> DP adapter which for some reason
  		 * filters out short pulses with a duration less than ~540 usec.
  		 *
  		 * The poll period is 50 msec to avoid missing an interrupt
  		 * after the sink has cleared it (after a 110msec timeout
  		 * since it raised the interrupt).
  		 */
  		ret = wait_event_timeout(mgr->tx_waitq,
  					 check_txmsg_state(mgr, txmsg),
  					 mgr->cbs->poll_hpd_irq ?
  						msecs_to_jiffies(50) :
  						wait_timeout);
  
  		if (ret || !mgr->cbs->poll_hpd_irq ||
  		    time_after(jiffies, wait_expires))
  			break;
  
  		mgr->cbs->poll_hpd_irq(mgr);
  	}
8b9f3437f   Imre Deak   drm/dp_mst: Sanit...
1328
  	mutex_lock(&mgr->qlock);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
  	if (ret > 0) {
  		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
  			ret = -EIO;
  			goto out;
  		}
  	} else {
  		DRM_DEBUG_KMS("timedout msg send %p %d %d
  ", txmsg, txmsg->state, txmsg->seqno);
  
  		/* dump some state */
  		ret = -EIO;
  
  		/* remove from q */
  		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
58c172178   Imre Deak   drm/dp_mst: Fix t...
1343
1344
  		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
  		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1345
  			list_del(&txmsg->next);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1346
1347
  	}
  out:
f0a8f533a   Jani Nikula   drm/print: add dr...
1348
  	if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
2f015ec6e   Lyude Paul   drm/dp_mst: Add s...
1349
1350
1351
1352
  		struct drm_printer p = drm_debug_printer(DBG_PREFIX);
  
  		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
  	}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1353
  	mutex_unlock(&mgr->qlock);
5a64967a2   Wayne Lin   drm/dp_mst: Have ...
1354
  	drm_dp_mst_kick_tx(mgr);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
  	return ret;
  }
  
  static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
  {
  	struct drm_dp_mst_branch *mstb;
  
  	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
  	if (!mstb)
  		return NULL;
  
  	mstb->lct = lct;
  	if (lct > 1)
  		memcpy(mstb->rad, rad, lct / 2);
  	INIT_LIST_HEAD(&mstb->ports);
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1370
1371
  	kref_init(&mstb->topology_kref);
  	kref_init(&mstb->malloc_kref);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1372
1373
  	return mstb;
  }
91a25e463   Mykola Lysenko   drm/dp/mst: deall...
1374
1375
  static void drm_dp_free_mst_branch_device(struct kref *kref)
  {
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1376
1377
1378
1379
1380
  	struct drm_dp_mst_branch *mstb =
  		container_of(kref, struct drm_dp_mst_branch, malloc_kref);
  
  	if (mstb->port_parent)
  		drm_dp_mst_put_port_malloc(mstb->port_parent);
91a25e463   Mykola Lysenko   drm/dp/mst: deall...
1381
1382
  	kfree(mstb);
  }
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
  /**
   * DOC: Branch device and port refcounting
   *
   * Topology refcount overview
   * ~~~~~~~~~~~~~~~~~~~~~~~~~~
   *
   * The refcounting schemes for &struct drm_dp_mst_branch and &struct
   * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
   * two different kinds of refcounts: topology refcounts, and malloc refcounts.
   *
   * Topology refcounts are not exposed to drivers, and are handled internally
   * by the DP MST helpers. The helpers use them in order to prevent the
   * in-memory topology state from being changed in the middle of critical
   * operations like changing the internal state of payload allocations. This
   * means each branch and port will be considered to be connected to the rest
1e55a53a2   Matt Roper   drm: Trivial comm...
1398
   * of the topology until its topology refcount reaches zero. Additionally,
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
   * for ports this means that their associated &struct drm_connector will stay
   * registered with userspace until the port's refcount reaches 0.
   *
   * Malloc refcount overview
   * ~~~~~~~~~~~~~~~~~~~~~~~~
   *
   * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
   * drm_dp_mst_branch allocated even after all of its topology references have
   * been dropped, so that the driver or MST helpers can safely access each
   * branch's last known state before it was disconnected from the topology.
   * When the malloc refcount of a port or branch reaches 0, the memory
   * allocation containing the &struct drm_dp_mst_branch or &struct
   * drm_dp_mst_port respectively will be freed.
   *
   * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
   * to drivers. As of writing this documentation, there are no drivers that
   * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
   * helpers. Exposing this API to drivers in a race-free manner would take more
   * tweaking of the refcounting scheme, however patches are welcome provided
   * there is a legitimate driver usecase for this.
   *
   * Refcount relationships in a topology
   * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   *
   * Let's take a look at why the relationship between topology and malloc
   * refcounts is designed the way it is.
   *
   * .. kernel-figure:: dp-mst/topology-figure-1.dot
   *
   *    An example of topology and malloc refs in a DP MST topology with two
   *    active payloads. Topology refcount increments are indicated by solid
   *    lines, and malloc refcount increments are indicated by dashed lines.
   *    Each starts from the branch which incremented the refcount, and ends at
   *    the branch to which the refcount belongs to, i.e. the arrow points the
   *    same way as the C pointers used to reference a structure.
   *
   * As you can see in the above figure, every branch increments the topology
1e55a53a2   Matt Roper   drm: Trivial comm...
1436
1437
   * refcount of its children, and increments the malloc refcount of its
   * parent. Additionally, every payload increments the malloc refcount of its
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
   * assigned port by 1.
   *
   * So, what would happen if MSTB #3 from the above figure was unplugged from
   * the system, but the driver hadn't yet removed payload #2 from port #3? The
   * topology would start to look like the figure below.
   *
   * .. kernel-figure:: dp-mst/topology-figure-2.dot
   *
   *    Ports and branch devices which have been released from memory are
   *    colored grey, and references which have been removed are colored red.
   *
   * Whenever a port or branch device's topology refcount reaches zero, it will
   * decrement the topology refcounts of all its children, the malloc refcount
   * of its parent, and finally its own malloc refcount. For MSTB #4 and port
   * #4, this means they both have been disconnected from the topology and freed
   * from memory. But, because payload #2 is still holding a reference to port
1e55a53a2   Matt Roper   drm: Trivial comm...
1454
   * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1455
   * is still accessible from memory. This also means port #3 has not yet
1e55a53a2   Matt Roper   drm: Trivial comm...
1456
   * decremented the malloc refcount of MSTB #3, so its &struct
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
   * drm_dp_mst_branch will also stay allocated in memory until port #3's
   * malloc refcount reaches 0.
   *
   * This relationship is necessary because in order to release payload #2, we
   * need to be able to figure out the last relative of port #3 that's still
   * connected to the topology. In this case, we would travel up the topology as
   * shown below.
   *
   * .. kernel-figure:: dp-mst/topology-figure-3.dot
   *
   * And finally, remove payload #2 by communicating with port #2 through
   * sideband transactions.
   */
  
  /**
   * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
   * device
   * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
   *
   * Increments &drm_dp_mst_branch.malloc_kref. When
   * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
   * will be released and @mstb may no longer be used.
   *
   * See also: drm_dp_mst_put_mstb_malloc()
   */
  static void
  drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
  {
  	kref_get(&mstb->malloc_kref);
  	DRM_DEBUG("mstb %p (%d)
  ", mstb, kref_read(&mstb->malloc_kref));
  }
  
  /**
   * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
   * device
   * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
   *
   * Decrements &drm_dp_mst_branch.malloc_kref. When
   * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
   * will be released and @mstb may no longer be used.
   *
   * See also: drm_dp_mst_get_mstb_malloc()
   */
  static void
  drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
  {
  	DRM_DEBUG("mstb %p (%d)
  ", mstb, kref_read(&mstb->malloc_kref) - 1);
  	kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
  }
  
  static void drm_dp_free_mst_port(struct kref *kref)
  {
  	struct drm_dp_mst_port *port =
  		container_of(kref, struct drm_dp_mst_port, malloc_kref);
  
  	drm_dp_mst_put_mstb_malloc(port->parent);
  	kfree(port);
  }
  
  /**
   * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
   * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
   *
   * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
   * reaches 0, the memory allocation for @port will be released and @port may
   * no longer be used.
   *
   * Because @port could potentially be freed at any time by the DP MST helpers
   * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
   * function, drivers that which to make use of &struct drm_dp_mst_port should
   * ensure that they grab at least one main malloc reference to their MST ports
   * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
   * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
   *
   * See also: drm_dp_mst_put_port_malloc()
   */
  void
  drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
  {
  	kref_get(&port->malloc_kref);
  	DRM_DEBUG("port %p (%d)
  ", port, kref_read(&port->malloc_kref));
  }
  EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
  
  /**
   * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
   * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
   *
   * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
   * reaches 0, the memory allocation for @port will be released and @port may
   * no longer be used.
   *
   * See also: drm_dp_mst_get_port_malloc()
   */
  void
  drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
  {
  	DRM_DEBUG("port %p (%d)
  ", port, kref_read(&port->malloc_kref) - 1);
  	kref_put(&port->malloc_kref, drm_dp_free_mst_port);
  }
  EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
12a280c72   Lyude Paul   drm/dp_mst: Add t...
1562
1563
1564
1565
1566
1567
1568
1569
  #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
  
  #define STACK_DEPTH 8
  
  static noinline void
  __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
  		    struct drm_dp_mst_topology_ref_history *history,
  		    enum drm_dp_mst_topology_ref_type type)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1570
  {
12a280c72   Lyude Paul   drm/dp_mst: Add t...
1571
1572
1573
1574
1575
  	struct drm_dp_mst_topology_ref_entry *entry = NULL;
  	depot_stack_handle_t backtrace;
  	ulong stack_entries[STACK_DEPTH];
  	uint n;
  	int i;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1576

12a280c72   Lyude Paul   drm/dp_mst: Add t...
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
  	n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
  	backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
  	if (!backtrace)
  		return;
  
  	/* Try to find an existing entry for this backtrace */
  	for (i = 0; i < history->len; i++) {
  		if (history->entries[i].backtrace == backtrace) {
  			entry = &history->entries[i];
  			break;
  		}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1588
  	}
12a280c72   Lyude Paul   drm/dp_mst: Add t...
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
  	/* Otherwise add one */
  	if (!entry) {
  		struct drm_dp_mst_topology_ref_entry *new;
  		int new_len = history->len + 1;
  
  		new = krealloc(history->entries, sizeof(*new) * new_len,
  			       GFP_KERNEL);
  		if (!new)
  			return;
  
  		entry = &new[history->len];
  		history->len = new_len;
  		history->entries = new;
  
  		entry->backtrace = backtrace;
  		entry->type = type;
  		entry->count = 0;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1606
  	}
12a280c72   Lyude Paul   drm/dp_mst: Add t...
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
  	entry->count++;
  	entry->ts_nsec = ktime_get_ns();
  }
  
  static int
  topology_ref_history_cmp(const void *a, const void *b)
  {
  	const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
  
  	if (entry_a->ts_nsec > entry_b->ts_nsec)
  		return 1;
  	else if (entry_a->ts_nsec < entry_b->ts_nsec)
  		return -1;
  	else
  		return 0;
  }
  
  static inline const char *
  topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
  {
  	if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
  		return "get";
  	else
  		return "put";
  }
  
  static void
  __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
  			    void *ptr, const char *type_str)
  {
  	struct drm_printer p = drm_debug_printer(DBG_PREFIX);
  	char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
  	int i;
  
  	if (!buf)
  		return;
  
  	if (!history->len)
  		goto out;
  
  	/* First, sort the list so that it goes from oldest to newest
  	 * reference entry
  	 */
  	sort(history->entries, history->len, sizeof(*history->entries),
  	     topology_ref_history_cmp, NULL);
  
  	drm_printf(&p, "%s (%p) topology count reached 0, dumping history:
  ",
  		   type_str, ptr);
  
  	for (i = 0; i < history->len; i++) {
  		const struct drm_dp_mst_topology_ref_entry *entry =
  			&history->entries[i];
  		ulong *entries;
  		uint nr_entries;
  		u64 ts_nsec = entry->ts_nsec;
b149cbfee   Sean Paul   drm/mst: Fix up u...
1663
  		u32 rem_nsec = do_div(ts_nsec, 1000000000);
12a280c72   Lyude Paul   drm/dp_mst: Add t...
1664
1665
1666
  
  		nr_entries = stack_depot_fetch(entry->backtrace, &entries);
  		stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
b149cbfee   Sean Paul   drm/mst: Fix up u...
1667
1668
  		drm_printf(&p, "  %d %ss (last at %5llu.%06u):
  %s",
12a280c72   Lyude Paul   drm/dp_mst: Add t...
1669
1670
1671
  			   entry->count,
  			   topology_ref_type_to_str(entry->type),
  			   ts_nsec, rem_nsec / 1000, buf);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1672
  	}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1673

12a280c72   Lyude Paul   drm/dp_mst: Add t...
1674
1675
1676
1677
1678
  	/* Now free the history, since this is the only time we expose it */
  	kfree(history->entries);
  out:
  	kfree(buf);
  }
91a25e463   Mykola Lysenko   drm/dp/mst: deall...
1679

12a280c72   Lyude Paul   drm/dp_mst: Add t...
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
  static __always_inline void
  drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
  {
  	__dump_topology_ref_history(&mstb->topology_ref_history, mstb,
  				    "MSTB");
  }
  
  static __always_inline void
  drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
  {
  	__dump_topology_ref_history(&port->topology_ref_history, port,
  				    "Port");
  }
  
  static __always_inline void
  save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
  		       enum drm_dp_mst_topology_ref_type type)
  {
  	__topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
  }
  
  static __always_inline void
  save_port_topology_ref(struct drm_dp_mst_port *port,
  		       enum drm_dp_mst_topology_ref_type type)
  {
  	__topology_ref_save(port->mgr, &port->topology_ref_history, type);
  }
  
  static inline void
  topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
  {
  	mutex_lock(&mgr->topology_ref_history_lock);
  }
  
  static inline void
  topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
  {
  	mutex_unlock(&mgr->topology_ref_history_lock);
  }
  #else
  static inline void
  topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
  static inline void
  topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
  static inline void
  drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
  static inline void
  drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
  #define save_mstb_topology_ref(mstb, type)
  #define save_port_topology_ref(port, type)
  #endif
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1731
1732
  static void drm_dp_destroy_mst_branch_device(struct kref *kref)
  {
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1733
1734
1735
  	struct drm_dp_mst_branch *mstb =
  		container_of(kref, struct drm_dp_mst_branch, topology_kref);
  	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1736

12a280c72   Lyude Paul   drm/dp_mst: Add t...
1737
  	drm_dp_mst_dump_mstb_topology_history(mstb);
7cb12d483   Lyude Paul   drm/dp_mst: Destr...
1738
  	INIT_LIST_HEAD(&mstb->destroy_next);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1739

7cb12d483   Lyude Paul   drm/dp_mst: Destr...
1740
1741
1742
1743
1744
1745
1746
  	/*
  	 * This can get called under mgr->mutex, so we need to perform the
  	 * actual destruction of the mstb in another worker
  	 */
  	mutex_lock(&mgr->delayed_destroy_lock);
  	list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
  	mutex_unlock(&mgr->delayed_destroy_lock);
72822c3bf   Imre Deak   drm/dp_mst: Fix f...
1747
  	queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1748
  }
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1749
1750
  /**
   * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1e55a53a2   Matt Roper   drm: Trivial comm...
1751
   * branch device unless it's zero
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
   * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
   *
   * Attempts to grab a topology reference to @mstb, if it hasn't yet been
   * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
   * reached 0). Holding a topology reference implies that a malloc reference
   * will be held to @mstb as long as the user holds the topology reference.
   *
   * Care should be taken to ensure that the user has at least one malloc
   * reference to @mstb. If you already have a topology reference to @mstb, you
   * should use drm_dp_mst_topology_get_mstb() instead.
   *
   * See also:
   * drm_dp_mst_topology_get_mstb()
   * drm_dp_mst_topology_put_mstb()
   *
   * Returns:
   * * 1: A topology reference was grabbed successfully
   * * 0: @port is no longer in the topology, no reference was grabbed
   */
  static int __must_check
  drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1773
  {
12a280c72   Lyude Paul   drm/dp_mst: Add t...
1774
  	int ret;
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1775

12a280c72   Lyude Paul   drm/dp_mst: Add t...
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
  	topology_ref_history_lock(mstb->mgr);
  	ret = kref_get_unless_zero(&mstb->topology_kref);
  	if (ret) {
  		DRM_DEBUG("mstb %p (%d)
  ",
  			  mstb, kref_read(&mstb->topology_kref));
  		save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
  	}
  
  	topology_ref_history_unlock(mstb->mgr);
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1786
1787
  
  	return ret;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1788
  }
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
  /**
   * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
   * branch device
   * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
   *
   * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
   * not it's already reached 0. This is only valid to use in scenarios where
   * you are already guaranteed to have at least one active topology reference
   * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
   *
   * See also:
   * drm_dp_mst_topology_try_get_mstb()
   * drm_dp_mst_topology_put_mstb()
   */
  static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
  {
12a280c72   Lyude Paul   drm/dp_mst: Add t...
1805
1806
1807
  	topology_ref_history_lock(mstb->mgr);
  
  	save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1808
1809
1810
1811
  	WARN_ON(kref_read(&mstb->topology_kref) == 0);
  	kref_get(&mstb->topology_kref);
  	DRM_DEBUG("mstb %p (%d)
  ", mstb, kref_read(&mstb->topology_kref));
12a280c72   Lyude Paul   drm/dp_mst: Add t...
1812
1813
  
  	topology_ref_history_unlock(mstb->mgr);
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
  }
  
  /**
   * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
   * device
   * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
   *
   * Releases a topology reference from @mstb by decrementing
   * &drm_dp_mst_branch.topology_kref.
   *
   * See also:
   * drm_dp_mst_topology_try_get_mstb()
   * drm_dp_mst_topology_get_mstb()
   */
  static void
  drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
  {
12a280c72   Lyude Paul   drm/dp_mst: Add t...
1831
  	topology_ref_history_lock(mstb->mgr);
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1832
1833
1834
  	DRM_DEBUG("mstb %p (%d)
  ",
  		  mstb, kref_read(&mstb->topology_kref) - 1);
12a280c72   Lyude Paul   drm/dp_mst: Add t...
1835
  	save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1836

12a280c72   Lyude Paul   drm/dp_mst: Add t...
1837
  	topology_ref_history_unlock(mstb->mgr);
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1838
  	kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1839
1840
1841
1842
  }
  
  static void drm_dp_destroy_port(struct kref *kref)
  {
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1843
1844
  	struct drm_dp_mst_port *port =
  		container_of(kref, struct drm_dp_mst_port, topology_kref);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1845
  	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
df4839fdc   Dave Airlie   drm/dp/mst: fixup...
1846

12a280c72   Lyude Paul   drm/dp_mst: Add t...
1847
  	drm_dp_mst_dump_port_topology_history(port);
6b8eeca65   Dave Airlie   drm/dp/mst: close...
1848

d29333cf5   Lyude Paul   drm/dp_mst: Remov...
1849
1850
1851
1852
  	/* There's nothing that needs locking to destroy an input port yet */
  	if (port->input) {
  		drm_dp_mst_put_port_malloc(port);
  		return;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1853
  	}
d29333cf5   Lyude Paul   drm/dp_mst: Remov...
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
  
  	kfree(port->cached_edid);
  
  	/*
  	 * we can't destroy the connector here, as we might be holding the
  	 * mode_config.mutex from an EDID retrieval
  	 */
  	mutex_lock(&mgr->delayed_destroy_lock);
  	list_add(&port->next, &mgr->destroy_port_list);
  	mutex_unlock(&mgr->delayed_destroy_lock);
72822c3bf   Imre Deak   drm/dp_mst: Fix f...
1864
  	queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1865
1866
1867
1868
  }
  
  /**
   * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1e55a53a2   Matt Roper   drm: Trivial comm...
1869
   * port unless it's zero
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
   * @port: &struct drm_dp_mst_port to increment the topology refcount of
   *
   * Attempts to grab a topology reference to @port, if it hasn't yet been
   * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
   * 0). Holding a topology reference implies that a malloc reference will be
   * held to @port as long as the user holds the topology reference.
   *
   * Care should be taken to ensure that the user has at least one malloc
   * reference to @port. If you already have a topology reference to @port, you
   * should use drm_dp_mst_topology_get_port() instead.
   *
   * See also:
   * drm_dp_mst_topology_get_port()
   * drm_dp_mst_topology_put_port()
   *
   * Returns:
   * * 1: A topology reference was grabbed successfully
   * * 0: @port is no longer in the topology, no reference was grabbed
   */
  static int __must_check
  drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
  {
12a280c72   Lyude Paul   drm/dp_mst: Add t...
1892
  	int ret;
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1893

12a280c72   Lyude Paul   drm/dp_mst: Add t...
1894
1895
1896
1897
1898
1899
1900
1901
  	topology_ref_history_lock(port->mgr);
  	ret = kref_get_unless_zero(&port->topology_kref);
  	if (ret) {
  		DRM_DEBUG("port %p (%d)
  ",
  			  port, kref_read(&port->topology_kref));
  		save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
  	}
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1902

12a280c72   Lyude Paul   drm/dp_mst: Add t...
1903
  	topology_ref_history_unlock(port->mgr);
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1904
  	return ret;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1905
  }
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
  /**
   * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
   * @port: The &struct drm_dp_mst_port to increment the topology refcount of
   *
   * Increments &drm_dp_mst_port.topology_refcount without checking whether or
   * not it's already reached 0. This is only valid to use in scenarios where
   * you are already guaranteed to have at least one active topology reference
   * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
   *
   * See also:
   * drm_dp_mst_topology_try_get_port()
   * drm_dp_mst_topology_put_port()
   */
  static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
  {
12a280c72   Lyude Paul   drm/dp_mst: Add t...
1921
  	topology_ref_history_lock(port->mgr);
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1922
1923
1924
1925
  	WARN_ON(kref_read(&port->topology_kref) == 0);
  	kref_get(&port->topology_kref);
  	DRM_DEBUG("port %p (%d)
  ", port, kref_read(&port->topology_kref));
12a280c72   Lyude Paul   drm/dp_mst: Add t...
1926
1927
1928
  	save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
  
  	topology_ref_history_unlock(port->mgr);
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
  }
  
  /**
   * drm_dp_mst_topology_put_port() - release a topology reference to a port
   * @port: The &struct drm_dp_mst_port to release the topology reference from
   *
   * Releases a topology reference from @port by decrementing
   * &drm_dp_mst_port.topology_kref.
   *
   * See also:
   * drm_dp_mst_topology_try_get_port()
   * drm_dp_mst_topology_get_port()
   */
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
1942
  static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1943
  {
12a280c72   Lyude Paul   drm/dp_mst: Add t...
1944
  	topology_ref_history_lock(port->mgr);
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1945
1946
1947
  	DRM_DEBUG("port %p (%d)
  ",
  		  port, kref_read(&port->topology_kref) - 1);
12a280c72   Lyude Paul   drm/dp_mst: Add t...
1948
1949
1950
  	save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
  
  	topology_ref_history_unlock(port->mgr);
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1951
  	kref_put(&port->topology_kref, drm_dp_destroy_port);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1952
  }
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
1953
1954
1955
  static struct drm_dp_mst_branch *
  drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
  					      struct drm_dp_mst_branch *to_find)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1956
1957
1958
  {
  	struct drm_dp_mst_port *port;
  	struct drm_dp_mst_branch *rmstb;
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1959
1960
  
  	if (to_find == mstb)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1961
  		return mstb;
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1962

ad7f8a1f9   Dave Airlie   drm/helper: add D...
1963
1964
  	list_for_each_entry(port, &mstb->ports, next) {
  		if (port->mstb) {
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
1965
1966
  			rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
  			    port->mstb, to_find);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1967
1968
1969
1970
1971
1972
  			if (rmstb)
  				return rmstb;
  		}
  	}
  	return NULL;
  }
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
1973
1974
1975
  static struct drm_dp_mst_branch *
  drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
  				       struct drm_dp_mst_branch *mstb)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1976
1977
  {
  	struct drm_dp_mst_branch *rmstb = NULL;
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1978

ad7f8a1f9   Dave Airlie   drm/helper: add D...
1979
  	mutex_lock(&mgr->lock);
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1980
  	if (mgr->mst_primary) {
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
1981
1982
  		rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
  		    mgr->mst_primary, mstb);
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1983
1984
1985
1986
  
  		if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
  			rmstb = NULL;
  	}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1987
1988
1989
  	mutex_unlock(&mgr->lock);
  	return rmstb;
  }
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1990
1991
1992
  static struct drm_dp_mst_port *
  drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
  					      struct drm_dp_mst_port *to_find)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1993
1994
1995
1996
  {
  	struct drm_dp_mst_port *port, *mport;
  
  	list_for_each_entry(port, &mstb->ports, next) {
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1997
  		if (port == to_find)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
1998
  			return port;
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
1999

ad7f8a1f9   Dave Airlie   drm/helper: add D...
2000
  		if (port->mstb) {
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
2001
2002
  			mport = drm_dp_mst_topology_get_port_validated_locked(
  			    port->mstb, to_find);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2003
2004
2005
2006
2007
2008
  			if (mport)
  				return mport;
  		}
  	}
  	return NULL;
  }
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
2009
2010
2011
  static struct drm_dp_mst_port *
  drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
  				       struct drm_dp_mst_port *port)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2012
2013
  {
  	struct drm_dp_mst_port *rport = NULL;
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
2014

ad7f8a1f9   Dave Airlie   drm/helper: add D...
2015
  	mutex_lock(&mgr->lock);
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
2016
2017
2018
2019
2020
2021
2022
  	if (mgr->mst_primary) {
  		rport = drm_dp_mst_topology_get_port_validated_locked(
  		    mgr->mst_primary, port);
  
  		if (rport && !drm_dp_mst_topology_try_get_port(rport))
  			rport = NULL;
  	}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2023
2024
2025
2026
2027
2028
2029
  	mutex_unlock(&mgr->lock);
  	return rport;
  }
  
  static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
  {
  	struct drm_dp_mst_port *port;
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
2030
  	int ret;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2031
2032
2033
  
  	list_for_each_entry(port, &mstb->ports, next) {
  		if (port->port_num == port_num) {
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
2034
2035
  			ret = drm_dp_mst_topology_try_get_port(port);
  			return ret ? port : NULL;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
  		}
  	}
  
  	return NULL;
  }
  
  /*
   * calculate a new RAD for this MST branch device
   * if parent has an LCT of 2 then it has 1 nibble of RAD,
   * if parent has an LCT of 3 then it has 2 nibbles of RAD,
   */
  static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
  				 u8 *rad)
  {
75af4c8c4   Mykola Lysenko   drm/dp/mst: fix i...
2050
  	int parent_lct = port->parent->lct;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2051
  	int shift = 4;
75af4c8c4   Mykola Lysenko   drm/dp/mst: fix i...
2052
  	int idx = (parent_lct - 1) / 2;
948de8423   Suraj Upadhyay   drm : Insert blan...
2053

75af4c8c4   Mykola Lysenko   drm/dp/mst: fix i...
2054
2055
2056
  	if (parent_lct > 1) {
  		memcpy(rad, port->parent->rad, idx + 1);
  		shift = (parent_lct % 2) ? 4 : 0;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2057
2058
2059
2060
  	} else
  		rad[0] = 0;
  
  	rad[idx] |= port->port_num << shift;
75af4c8c4   Mykola Lysenko   drm/dp/mst: fix i...
2061
  	return parent_lct + 1;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2062
  }
b2feb1d6d   Lyude Paul   drm/dp_mst: Renam...
2063
  static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
db1a07956   Wayne Lin   drm/dp_mst: Handl...
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
  {
  	switch (pdt) {
  	case DP_PEER_DEVICE_DP_LEGACY_CONV:
  	case DP_PEER_DEVICE_SST_SINK:
  		return true;
  	case DP_PEER_DEVICE_MST_BRANCHING:
  		/* For sst branch device */
  		if (!mcs)
  			return true;
  
  		return false;
  	}
  	return true;
  }
  
  static int
  drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
  		    bool new_mcs)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2082
  {
c485e2c97   Lyude Paul   drm/dp_mst: Refac...
2083
2084
2085
2086
  	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
  	struct drm_dp_mst_branch *mstb;
  	u8 rad[8], lct;
  	int ret = 0;
db1a07956   Wayne Lin   drm/dp_mst: Handl...
2087
  	if (port->pdt == new_pdt && port->mcs == new_mcs)
c485e2c97   Lyude Paul   drm/dp_mst: Refac...
2088
2089
2090
  		return 0;
  
  	/* Teardown the old pdt, if there is one */
db1a07956   Wayne Lin   drm/dp_mst: Handl...
2091
  	if (port->pdt != DP_PEER_DEVICE_NONE) {
b2feb1d6d   Lyude Paul   drm/dp_mst: Renam...
2092
  		if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
db1a07956   Wayne Lin   drm/dp_mst: Handl...
2093
2094
2095
2096
2097
  			/*
  			 * If the new PDT would also have an i2c bus,
  			 * don't bother with reregistering it
  			 */
  			if (new_pdt != DP_PEER_DEVICE_NONE &&
b2feb1d6d   Lyude Paul   drm/dp_mst: Renam...
2098
  			    drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
db1a07956   Wayne Lin   drm/dp_mst: Handl...
2099
2100
2101
2102
  				port->pdt = new_pdt;
  				port->mcs = new_mcs;
  				return 0;
  			}
c485e2c97   Lyude Paul   drm/dp_mst: Refac...
2103

db1a07956   Wayne Lin   drm/dp_mst: Handl...
2104
  			/* remove i2c over sideband */
d8bd15b37   Imre Deak   drm/dp_mst: Fix t...
2105
  			drm_dp_mst_unregister_i2c_bus(port);
db1a07956   Wayne Lin   drm/dp_mst: Handl...
2106
2107
2108
2109
2110
2111
  		} else {
  			mutex_lock(&mgr->lock);
  			drm_dp_mst_topology_put_mstb(port->mstb);
  			port->mstb = NULL;
  			mutex_unlock(&mgr->lock);
  		}
c485e2c97   Lyude Paul   drm/dp_mst: Refac...
2112
2113
2114
  	}
  
  	port->pdt = new_pdt;
db1a07956   Wayne Lin   drm/dp_mst: Handl...
2115
  	port->mcs = new_mcs;
c485e2c97   Lyude Paul   drm/dp_mst: Refac...
2116

db1a07956   Wayne Lin   drm/dp_mst: Handl...
2117
  	if (port->pdt != DP_PEER_DEVICE_NONE) {
b2feb1d6d   Lyude Paul   drm/dp_mst: Renam...
2118
  		if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
db1a07956   Wayne Lin   drm/dp_mst: Handl...
2119
  			/* add i2c over sideband */
d8bd15b37   Imre Deak   drm/dp_mst: Fix t...
2120
  			ret = drm_dp_mst_register_i2c_bus(port);
db1a07956   Wayne Lin   drm/dp_mst: Handl...
2121
2122
2123
2124
2125
2126
2127
2128
2129
  		} else {
  			lct = drm_dp_calculate_rad(port, rad);
  			mstb = drm_dp_add_mst_branch_device(lct, rad);
  			if (!mstb) {
  				ret = -ENOMEM;
  				DRM_ERROR("Failed to create MSTB for port %p",
  					  port);
  				goto out;
  			}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2130

db1a07956   Wayne Lin   drm/dp_mst: Handl...
2131
2132
2133
2134
  			mutex_lock(&mgr->lock);
  			port->mstb = mstb;
  			mstb->mgr = port->mgr;
  			mstb->port_parent = port;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2135

db1a07956   Wayne Lin   drm/dp_mst: Handl...
2136
2137
2138
2139
2140
2141
  			/*
  			 * Make sure this port's memory allocation stays
  			 * around until its child MSTB releases it
  			 */
  			drm_dp_mst_get_port_malloc(port);
  			mutex_unlock(&mgr->lock);
c485e2c97   Lyude Paul   drm/dp_mst: Refac...
2142

db1a07956   Wayne Lin   drm/dp_mst: Handl...
2143
2144
2145
  			/* And make sure we send a link address for this */
  			ret = 1;
  		}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2146
  	}
c485e2c97   Lyude Paul   drm/dp_mst: Refac...
2147
2148
2149
2150
2151
  
  out:
  	if (ret < 0)
  		port->pdt = DP_PEER_DEVICE_NONE;
  	return ret;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2152
  }
562836a26   Ville Syrjälä   drm/dp_mst: Enabl...
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
  /**
   * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
   * @aux: Fake sideband AUX CH
   * @offset: address of the (first) register to read
   * @buffer: buffer to store the register values
   * @size: number of bytes in @buffer
   *
   * Performs the same functionality for remote devices via
   * sideband messaging as drm_dp_dpcd_read() does for local
   * devices via actual AUX CH.
   *
   * Return: Number of bytes read, or negative error code on failure.
   */
  ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
  			     unsigned int offset, void *buffer, size_t size)
  {
  	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
  						    aux);
  
  	return drm_dp_send_dpcd_read(port->mgr, port,
  				     offset, size, buffer);
  }
  
  /**
   * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
   * @aux: Fake sideband AUX CH
   * @offset: address of the (first) register to write
   * @buffer: buffer containing the values to write
   * @size: number of bytes in @buffer
   *
   * Performs the same functionality for remote devices via
   * sideband messaging as drm_dp_dpcd_write() does for local
   * devices via actual AUX CH.
   *
4c0a9b62b   Lyude Paul   drm/dp_mst: Make ...
2187
   * Return: number of bytes written on success, negative error code on failure.
562836a26   Ville Syrjälä   drm/dp_mst: Enabl...
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
   */
  ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
  			      unsigned int offset, void *buffer, size_t size)
  {
  	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
  						    aux);
  
  	return drm_dp_send_dpcd_write(port->mgr, port,
  				      offset, size, buffer);
  }
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
2198
  static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2199
  {
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
2200
  	int ret = 0;
5e93b8208   Hersen Wu   drm/dp/mst: move ...
2201
2202
2203
2204
2205
  
  	memcpy(mstb->guid, guid, 16);
  
  	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
  		if (mstb->port_parent) {
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
2206
2207
2208
  			ret = drm_dp_send_dpcd_write(mstb->mgr,
  						     mstb->port_parent,
  						     DP_GUID, 16, mstb->guid);
5e93b8208   Hersen Wu   drm/dp/mst: move ...
2209
  		} else {
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
2210
2211
  			ret = drm_dp_dpcd_write(mstb->mgr->aux,
  						DP_GUID, mstb->guid, 16);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2212
2213
  		}
  	}
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
2214

94b6ada40   Lyude Paul   drm/dp_mst: Fix d...
2215
2216
2217
2218
  	if (ret < 16 && ret > 0)
  		return -EPROTO;
  
  	return ret == 16 ? 0 : ret;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2219
  }
1c960876b   Dave Airlie   drm/dp/mst: don't...
2220
2221
  static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
  				int pnum,
d87af4d10   Rickard Strandqvist   gpu: drm: drm_dp_...
2222
2223
  				char *proppath,
  				size_t proppath_size)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2224
2225
2226
  {
  	int i;
  	char temp[8];
948de8423   Suraj Upadhyay   drm : Insert blan...
2227

d87af4d10   Rickard Strandqvist   gpu: drm: drm_dp_...
2228
  	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2229
2230
  	for (i = 0; i < (mstb->lct - 1); i++) {
  		int shift = (i % 2) ? 0 : 4;
7a11a334a   Mykola Lysenko   drm/dp/mst: fix i...
2231
  		int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
948de8423   Suraj Upadhyay   drm : Insert blan...
2232

d87af4d10   Rickard Strandqvist   gpu: drm: drm_dp_...
2233
2234
  		snprintf(temp, sizeof(temp), "-%d", port_num);
  		strlcat(proppath, temp, proppath_size);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2235
  	}
1c960876b   Dave Airlie   drm/dp/mst: don't...
2236
  	snprintf(temp, sizeof(temp), "-%d", pnum);
d87af4d10   Rickard Strandqvist   gpu: drm: drm_dp_...
2237
  	strlcat(proppath, temp, proppath_size);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2238
  }
562836a26   Ville Syrjälä   drm/dp_mst: Enabl...
2239
2240
  /**
   * drm_dp_mst_connector_late_register() - Late MST connector registration
63b87c310   Sean Paul   drm/mst: Fix sphi...
2241
   * @connector: The MST connector
562836a26   Ville Syrjälä   drm/dp_mst: Enabl...
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
   * @port: The MST port for this connector
   *
   * Helper to register the remote aux device for this MST port. Drivers should
   * call this from their mst connector's late_register hook to enable MST aux
   * devices.
   *
   * Return: 0 on success, negative error code on failure.
   */
  int drm_dp_mst_connector_late_register(struct drm_connector *connector,
  				       struct drm_dp_mst_port *port)
  {
  	DRM_DEBUG_KMS("registering %s remote bus for %s
  ",
  		      port->aux.name, connector->kdev->kobj.name);
  
  	port->aux.dev = connector->kdev;
  	return drm_dp_aux_register_devnode(&port->aux);
  }
  EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
  
  /**
   * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
63b87c310   Sean Paul   drm/mst: Fix sphi...
2264
   * @connector: The MST connector
562836a26   Ville Syrjälä   drm/dp_mst: Enabl...
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
   * @port: The MST port for this connector
   *
   * Helper to unregister the remote aux device for this MST port, registered by
   * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
   * connector's early_unregister hook.
   */
  void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
  					   struct drm_dp_mst_port *port)
  {
  	DRM_DEBUG_KMS("unregistering %s remote bus for %s
  ",
  		      port->aux.name, connector->kdev->kobj.name);
  	drm_dp_aux_unregister_devnode(&port->aux);
  }
  EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
e2839ff69   Lyude Paul   drm/dp_mst: Renam...
2280
  static void
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
  drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
  			      struct drm_dp_mst_port *port)
  {
  	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
  	char proppath[255];
  	int ret;
  
  	build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
  	port->connector = mgr->cbs->add_connector(mgr, port, proppath);
  	if (!port->connector) {
  		ret = -ENOMEM;
  		goto error;
  	}
db1a07956   Wayne Lin   drm/dp_mst: Handl...
2294
  	if (port->pdt != DP_PEER_DEVICE_NONE &&
b2feb1d6d   Lyude Paul   drm/dp_mst: Renam...
2295
  	    drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2296
2297
2298
2299
  		port->cached_edid = drm_get_edid(port->connector,
  						 &port->aux.ddc);
  		drm_connector_set_tile_property(port->connector);
  	}
f8d97d98e   Pankaj Bharadiya   drm: Register con...
2300
  	drm_connector_register(port->connector);
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
  	return;
  
  error:
  	DRM_ERROR("Failed to create connector for port %p: %d
  ", port, ret);
  }
  
  /*
   * Drop a topology reference, and unlink the port from the in-memory topology
   * layout
   */
  static void
  drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
  				struct drm_dp_mst_port *port)
  {
  	mutex_lock(&mgr->lock);
b1dee9a71   David Francis   drm/dp_mst: Fill ...
2317
  	port->parent->num_ports--;
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
  	list_del(&port->next);
  	mutex_unlock(&mgr->lock);
  	drm_dp_mst_topology_put_port(port);
  }
  
  static struct drm_dp_mst_port *
  drm_dp_mst_add_port(struct drm_device *dev,
  		    struct drm_dp_mst_topology_mgr *mgr,
  		    struct drm_dp_mst_branch *mstb, u8 port_number)
  {
  	struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
  
  	if (!port)
  		return NULL;
  
  	kref_init(&port->topology_kref);
  	kref_init(&port->malloc_kref);
  	port->parent = mstb;
  	port->port_num = port_number;
  	port->mgr = mgr;
  	port->aux.name = "DPMST";
  	port->aux.dev = dev->dev;
  	port->aux.is_remote = true;
c908b1c4b   David (Dingchen) Zhang   drm: add dp helpe...
2341
2342
  	/* initialize the MST downstream port's AUX crc work queue */
  	drm_dp_remote_aux_init(&port->aux);
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2343
2344
2345
2346
2347
2348
2349
2350
  	/*
  	 * Make sure the memory allocation for our parent branch stays
  	 * around until our own memory allocation is released
  	 */
  	drm_dp_mst_get_mstb_malloc(mstb);
  
  	return port;
  }
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2351
  static int
e2839ff69   Lyude Paul   drm/dp_mst: Renam...
2352
2353
2354
  drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
  				    struct drm_device *dev,
  				    struct drm_dp_link_addr_reply_port *port_msg)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2355
  {
c485e2c97   Lyude Paul   drm/dp_mst: Refac...
2356
  	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2357
  	struct drm_dp_mst_port *port;
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2358
2359
  	int old_ddps = 0, ret;
  	u8 new_pdt = DP_PEER_DEVICE_NONE;
db1a07956   Wayne Lin   drm/dp_mst: Handl...
2360
  	bool new_mcs = 0;
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2361
  	bool created = false, send_link_addr = false, changed = false;
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
2362

ad7f8a1f9   Dave Airlie   drm/helper: add D...
2363
2364
  	port = drm_dp_get_port(mstb, port_msg->port_number);
  	if (!port) {
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2365
2366
  		port = drm_dp_mst_add_port(dev, mgr, mstb,
  					   port_msg->port_number);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2367
  		if (!port)
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2368
  			return -ENOMEM;
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2369
  		created = true;
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2370
2371
2372
2373
  		changed = true;
  	} else if (!port->input && port_msg->input_port && port->connector) {
  		/* Since port->connector can't be changed here, we create a
  		 * new port if input_port changes from 0 to 1
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
2374
  		 */
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2375
2376
  		drm_dp_mst_topology_unlink_port(mgr, port);
  		drm_dp_mst_topology_put_port(port);
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2377
2378
2379
  		port = drm_dp_mst_add_port(dev, mgr, mstb,
  					   port_msg->port_number);
  		if (!port)
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2380
2381
  			return -ENOMEM;
  		changed = true;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2382
  		created = true;
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2383
2384
2385
2386
2387
  	} else if (port->input && !port_msg->input_port) {
  		changed = true;
  	} else if (port->connector) {
  		/* We're updating a port that's exposed to userspace, so do it
  		 * under lock
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2388
2389
  		 */
  		drm_modeset_lock(&mgr->base.lock, NULL);
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2390

ad7f8a1f9   Dave Airlie   drm/helper: add D...
2391
  		old_ddps = port->ddps;
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2392
2393
2394
2395
2396
2397
2398
2399
  		changed = port->ddps != port_msg->ddps ||
  			(port->ddps &&
  			 (port->ldps != port_msg->legacy_device_plug_status ||
  			  port->dpcd_rev != port_msg->dpcd_revision ||
  			  port->mcs != port_msg->mcs ||
  			  port->pdt != port_msg->peer_device_type ||
  			  port->num_sdp_stream_sinks !=
  			  port_msg->num_sdp_stream_sinks));
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2400
  	}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2401
  	port->input = port_msg->input_port;
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2402
2403
  	if (!port->input)
  		new_pdt = port_msg->peer_device_type;
db1a07956   Wayne Lin   drm/dp_mst: Handl...
2404
  	new_mcs = port_msg->mcs;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2405
2406
2407
2408
2409
  	port->ddps = port_msg->ddps;
  	port->ldps = port_msg->legacy_device_plug_status;
  	port->dpcd_rev = port_msg->dpcd_revision;
  	port->num_sdp_streams = port_msg->num_sdp_streams;
  	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2410
2411
2412
2413
  
  	/* manage mstb port lists with mgr lock - take a reference
  	   for this list */
  	if (created) {
c485e2c97   Lyude Paul   drm/dp_mst: Refac...
2414
  		mutex_lock(&mgr->lock);
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
2415
  		drm_dp_mst_topology_get_port(port);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2416
  		list_add(&port->next, &mstb->ports);
b1dee9a71   David Francis   drm/dp_mst: Fill ...
2417
  		mstb->num_ports++;
c485e2c97   Lyude Paul   drm/dp_mst: Refac...
2418
  		mutex_unlock(&mgr->lock);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2419
  	}
87212b51b   Lyude Paul   drm/dp_mst: Repro...
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
  	/*
  	 * Reprobe PBN caps on both hotplug, and when re-probing the link
  	 * for our parent mstb
  	 */
  	if (old_ddps != port->ddps || !created) {
  		if (port->ddps && !port->input) {
  			ret = drm_dp_send_enum_path_resources(mgr, mstb,
  							      port);
  			if (ret == 1)
  				changed = true;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2430
  		} else {
fcf463807   Lyude Paul   drm/dp_mst: Use f...
2431
  			port->full_pbn = 0;
3d76df632   Lyude Paul   drm/dp_mst: Fix s...
2432
  		}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2433
  	}
db1a07956   Wayne Lin   drm/dp_mst: Handl...
2434
  	ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2435
2436
2437
2438
2439
2440
2441
  	if (ret == 1) {
  		send_link_addr = true;
  	} else if (ret < 0) {
  		DRM_ERROR("Failed to change PDT on port %p: %d
  ",
  			  port, ret);
  		goto fail;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2442
  	}
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2443
2444
2445
2446
2447
  	/*
  	 * If this port wasn't just created, then we're reprobing because
  	 * we're coming out of suspend. In this case, always resend the link
  	 * address if there's an MSTB on this port
  	 */
db1a07956   Wayne Lin   drm/dp_mst: Handl...
2448
2449
  	if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
  	    port->mcs)
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2450
2451
2452
  		send_link_addr = true;
  
  	if (port->connector)
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2453
  		drm_modeset_unlock(&mgr->base.lock);
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2454
  	else if (!port->input)
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2455
  		drm_dp_mst_port_add_connector(mstb, port);
c485e2c97   Lyude Paul   drm/dp_mst: Refac...
2456

6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2457
2458
2459
2460
2461
2462
  	if (send_link_addr && port->mstb) {
  		ret = drm_dp_send_link_address(mgr, port->mstb);
  		if (ret == 1) /* MSTB below us changed */
  			changed = true;
  		else if (ret < 0)
  			goto fail_put;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2463
  	}
8ae22cb41   Dave Airlie   Revert "drm/dp/ms...
2464

ad7f8a1f9   Dave Airlie   drm/helper: add D...
2465
  	/* put reference to this port */
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
2466
  	drm_dp_mst_topology_put_port(port);
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2467
  	return changed;
c485e2c97   Lyude Paul   drm/dp_mst: Refac...
2468
2469
  
  fail:
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2470
  	drm_dp_mst_topology_unlink_port(mgr, port);
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2471
  	if (port->connector)
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2472
  		drm_modeset_unlock(&mgr->base.lock);
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2473
2474
2475
  fail_put:
  	drm_dp_mst_topology_put_port(port);
  	return ret;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2476
  }
e2839ff69   Lyude Paul   drm/dp_mst: Renam...
2477
2478
2479
  static void
  drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
  			    struct drm_dp_connection_status_notify *conn_stat)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2480
  {
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2481
  	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2482
  	struct drm_dp_mst_port *port;
7617e9621   Wayne Lin   drm/dp_mst: clear...
2483
  	int old_ddps, old_input, ret, i;
dad7d84f8   Lyude Paul   drm/dp_mst: Don't...
2484
  	u8 new_pdt;
db1a07956   Wayne Lin   drm/dp_mst: Handl...
2485
  	bool new_mcs;
dad7d84f8   Lyude Paul   drm/dp_mst: Don't...
2486
  	bool dowork = false, create_connector = false;
c485e2c97   Lyude Paul   drm/dp_mst: Refac...
2487

ad7f8a1f9   Dave Airlie   drm/helper: add D...
2488
2489
2490
  	port = drm_dp_get_port(mstb, conn_stat->port_number);
  	if (!port)
  		return;
dad7d84f8   Lyude Paul   drm/dp_mst: Don't...
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
  	if (port->connector) {
  		if (!port->input && conn_stat->input_port) {
  			/*
  			 * We can't remove a connector from an already exposed
  			 * port, so just throw the port out and make sure we
  			 * reprobe the link address of it's parent MSTB
  			 */
  			drm_dp_mst_topology_unlink_port(mgr, port);
  			mstb->link_address_sent = false;
  			dowork = true;
  			goto out;
  		}
  
  		/* Locking is only needed if the port's exposed to userspace */
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2505
  		drm_modeset_lock(&mgr->base.lock, NULL);
dad7d84f8   Lyude Paul   drm/dp_mst: Don't...
2506
2507
2508
2509
2510
2511
  	} else if (port->input && !conn_stat->input_port) {
  		create_connector = true;
  		/* Reprobe link address so we get num_sdp_streams */
  		mstb->link_address_sent = false;
  		dowork = true;
  	}
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2512

ad7f8a1f9   Dave Airlie   drm/helper: add D...
2513
  	old_ddps = port->ddps;
7617e9621   Wayne Lin   drm/dp_mst: clear...
2514
  	old_input = port->input;
dad7d84f8   Lyude Paul   drm/dp_mst: Don't...
2515
  	port->input = conn_stat->input_port;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2516
2517
2518
2519
  	port->ldps = conn_stat->legacy_device_plug_status;
  	port->ddps = conn_stat->displayport_device_plug_status;
  
  	if (old_ddps != port->ddps) {
87212b51b   Lyude Paul   drm/dp_mst: Repro...
2520
2521
2522
  		if (port->ddps && !port->input)
  			drm_dp_send_enum_path_resources(mgr, mstb, port);
  		else
fcf463807   Lyude Paul   drm/dp_mst: Use f...
2523
  			port->full_pbn = 0;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2524
  	}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2525

dad7d84f8   Lyude Paul   drm/dp_mst: Don't...
2526
  	new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
db1a07956   Wayne Lin   drm/dp_mst: Handl...
2527
2528
  	new_mcs = conn_stat->message_capability_status;
  	ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
dad7d84f8   Lyude Paul   drm/dp_mst: Don't...
2529
2530
2531
2532
2533
2534
2535
  	if (ret == 1) {
  		dowork = true;
  	} else if (ret < 0) {
  		DRM_ERROR("Failed to change PDT for port %p: %d
  ",
  			  port, ret);
  		dowork = false;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2536
  	}
7617e9621   Wayne Lin   drm/dp_mst: clear...
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
  	if (!old_input && old_ddps != port->ddps && !port->ddps) {
  		for (i = 0; i < mgr->max_payloads; i++) {
  			struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
  			struct drm_dp_mst_port *port_validated;
  
  			if (!vcpi)
  				continue;
  
  			port_validated =
  				container_of(vcpi, struct drm_dp_mst_port, vcpi);
  			port_validated =
  				drm_dp_mst_topology_get_port_validated(mgr, port_validated);
  			if (!port_validated) {
  				mutex_lock(&mgr->payload_lock);
  				vcpi->num_slots = 0;
  				mutex_unlock(&mgr->payload_lock);
  			} else {
  				drm_dp_mst_topology_put_port(port_validated);
  			}
  		}
  	}
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2558
2559
  	if (port->connector)
  		drm_modeset_unlock(&mgr->base.lock);
dad7d84f8   Lyude Paul   drm/dp_mst: Don't...
2560
2561
  	else if (create_connector)
  		drm_dp_mst_port_add_connector(mstb, port);
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2562

dad7d84f8   Lyude Paul   drm/dp_mst: Don't...
2563
  out:
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
2564
  	drm_dp_mst_topology_put_port(port);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2565
2566
  	if (dowork)
  		queue_work(system_long_wq, &mstb->mgr->work);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2567
2568
2569
2570
2571
2572
2573
  }
  
  static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
  							       u8 lct, u8 *rad)
  {
  	struct drm_dp_mst_branch *mstb;
  	struct drm_dp_mst_port *port;
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
2574
  	int i, ret;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2575
  	/* find the port by iterating down */
9eb1e57f5   Dave Airlie   drm/dp/mst: take ...
2576
2577
  
  	mutex_lock(&mgr->lock);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2578
  	mstb = mgr->mst_primary;
23d800390   Stanislav Lisovskiy   drm/dp_mst: Check...
2579
2580
  	if (!mstb)
  		goto out;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2581
2582
  	for (i = 0; i < lct - 1; i++) {
  		int shift = (i % 2) ? 0 : 4;
7a11a334a   Mykola Lysenko   drm/dp/mst: fix i...
2583
  		int port_num = (rad[i / 2] >> shift) & 0xf;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2584
2585
2586
  
  		list_for_each_entry(port, &mstb->ports, next) {
  			if (port->port_num == port_num) {
30730c7f5   Adam Richter   drm: fix mutex le...
2587
2588
  				mstb = port->mstb;
  				if (!mstb) {
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2589
2590
  					DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x
  ", lct, rad[0]);
30730c7f5   Adam Richter   drm: fix mutex le...
2591
  					goto out;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2592
  				}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2593
2594
2595
2596
  				break;
  			}
  		}
  	}
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
2597
2598
2599
  	ret = drm_dp_mst_topology_try_get_mstb(mstb);
  	if (!ret)
  		mstb = NULL;
30730c7f5   Adam Richter   drm: fix mutex le...
2600
  out:
9eb1e57f5   Dave Airlie   drm/dp/mst: take ...
2601
  	mutex_unlock(&mgr->lock);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2602
2603
  	return mstb;
  }
bd9343208   Mykola Lysenko   drm/dp/mst: proce...
2604
2605
  static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
  	struct drm_dp_mst_branch *mstb,
fde61a7a7   Lyude Paul   drm/dp_mst: Const...
2606
  	const uint8_t *guid)
bd9343208   Mykola Lysenko   drm/dp/mst: proce...
2607
2608
2609
  {
  	struct drm_dp_mst_branch *found_mstb;
  	struct drm_dp_mst_port *port;
5e93b8208   Hersen Wu   drm/dp/mst: move ...
2610
2611
  	if (memcmp(mstb->guid, guid, 16) == 0)
  		return mstb;
bd9343208   Mykola Lysenko   drm/dp/mst: proce...
2612
2613
2614
  	list_for_each_entry(port, &mstb->ports, next) {
  		if (!port->mstb)
  			continue;
bd9343208   Mykola Lysenko   drm/dp/mst: proce...
2615
2616
2617
2618
2619
2620
2621
2622
  		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
  
  		if (found_mstb)
  			return found_mstb;
  	}
  
  	return NULL;
  }
de6d68182   Lyude Paul   drm/dp_mst: Fix s...
2623
2624
  static struct drm_dp_mst_branch *
  drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
fde61a7a7   Lyude Paul   drm/dp_mst: Const...
2625
  				     const uint8_t *guid)
bd9343208   Mykola Lysenko   drm/dp/mst: proce...
2626
2627
  {
  	struct drm_dp_mst_branch *mstb;
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
2628
  	int ret;
bd9343208   Mykola Lysenko   drm/dp/mst: proce...
2629
2630
2631
  
  	/* find the port by iterating down */
  	mutex_lock(&mgr->lock);
5e93b8208   Hersen Wu   drm/dp/mst: move ...
2632
  	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
2633
2634
2635
2636
2637
  	if (mstb) {
  		ret = drm_dp_mst_topology_try_get_mstb(mstb);
  		if (!ret)
  			mstb = NULL;
  	}
bd9343208   Mykola Lysenko   drm/dp/mst: proce...
2638
2639
2640
2641
  
  	mutex_unlock(&mgr->lock);
  	return mstb;
  }
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2642
  static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2643
2644
2645
  					       struct drm_dp_mst_branch *mstb)
  {
  	struct drm_dp_mst_port *port;
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2646
2647
  	int ret;
  	bool changed = false;
14692a363   Lyude Paul   drm/dp_mst: Add p...
2648

6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2649
2650
2651
2652
2653
2654
2655
  	if (!mstb->link_address_sent) {
  		ret = drm_dp_send_link_address(mgr, mstb);
  		if (ret == 1)
  			changed = true;
  		else if (ret < 0)
  			return ret;
  	}
68d8c9fc9   Dave Airlie   drm/dp/mst: updat...
2656

ad7f8a1f9   Dave Airlie   drm/helper: add D...
2657
  	list_for_each_entry(port, &mstb->ports, next) {
14692a363   Lyude Paul   drm/dp_mst: Add p...
2658
  		struct drm_dp_mst_branch *mstb_child = NULL;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2659

14692a363   Lyude Paul   drm/dp_mst: Add p...
2660
  		if (port->input || !port->ddps)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2661
  			continue;
14692a363   Lyude Paul   drm/dp_mst: Add p...
2662
  		if (port->mstb)
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
2663
2664
  			mstb_child = drm_dp_mst_topology_get_mstb_validated(
  			    mgr, port->mstb);
14692a363   Lyude Paul   drm/dp_mst: Add p...
2665
2666
  
  		if (mstb_child) {
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2667
2668
  			ret = drm_dp_check_and_send_link_address(mgr,
  								 mstb_child);
14692a363   Lyude Paul   drm/dp_mst: Add p...
2669
  			drm_dp_mst_topology_put_mstb(mstb_child);
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2670
2671
2672
2673
  			if (ret == 1)
  				changed = true;
  			else if (ret < 0)
  				return ret;
9254ec496   Daniel Vetter   drm/dp/mst: make ...
2674
  		}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2675
  	}
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2676
2677
  
  	return changed;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2678
2679
2680
2681
  }
  
  static void drm_dp_mst_link_probe_work(struct work_struct *work)
  {
14692a363   Lyude Paul   drm/dp_mst: Add p...
2682
2683
2684
  	struct drm_dp_mst_topology_mgr *mgr =
  		container_of(work, struct drm_dp_mst_topology_mgr, work);
  	struct drm_device *dev = mgr->dev;
9254ec496   Daniel Vetter   drm/dp/mst: make ...
2685
  	struct drm_dp_mst_branch *mstb;
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
2686
  	int ret;
f79489074   Sean Paul   drm/dp_mst: Clear...
2687
  	bool clear_payload_id_table;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2688

14692a363   Lyude Paul   drm/dp_mst: Add p...
2689
  	mutex_lock(&mgr->probe_lock);
9254ec496   Daniel Vetter   drm/dp/mst: make ...
2690
  	mutex_lock(&mgr->lock);
f79489074   Sean Paul   drm/dp_mst: Clear...
2691
2692
  	clear_payload_id_table = !mgr->payload_id_table_cleared;
  	mgr->payload_id_table_cleared = true;
9254ec496   Daniel Vetter   drm/dp/mst: make ...
2693
2694
  	mstb = mgr->mst_primary;
  	if (mstb) {
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
2695
2696
2697
  		ret = drm_dp_mst_topology_try_get_mstb(mstb);
  		if (!ret)
  			mstb = NULL;
9254ec496   Daniel Vetter   drm/dp/mst: make ...
2698
2699
  	}
  	mutex_unlock(&mgr->lock);
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2700
2701
2702
  	if (!mstb) {
  		mutex_unlock(&mgr->probe_lock);
  		return;
9254ec496   Daniel Vetter   drm/dp/mst: make ...
2703
  	}
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2704

f79489074   Sean Paul   drm/dp_mst: Clear...
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
  	/*
  	 * Certain branch devices seem to incorrectly report an available_pbn
  	 * of 0 on downstream sinks, even after clearing the
  	 * DP_PAYLOAD_ALLOCATE_* registers in
  	 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
  	 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
  	 * things work again.
  	 */
  	if (clear_payload_id_table) {
  		DRM_DEBUG_KMS("Clearing payload ID table
  ");
  		drm_dp_send_clear_payload_id_table(mgr, mstb);
9254ec496   Daniel Vetter   drm/dp/mst: make ...
2717
  	}
f79489074   Sean Paul   drm/dp_mst: Clear...
2718

6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2719
  	ret = drm_dp_check_and_send_link_address(mgr, mstb);
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
2720
  	drm_dp_mst_topology_put_mstb(mstb);
14692a363   Lyude Paul   drm/dp_mst: Add p...
2721
  	mutex_unlock(&mgr->probe_lock);
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2722
2723
  	if (ret)
  		drm_kms_helper_hotplug_event(dev);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2724
2725
2726
2727
2728
  }
  
  static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
  				 u8 *guid)
  {
e38e12895   Ville Syrjälä   drm/dp/mst: Use m...
2729
  	u64 salt;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2730

e38e12895   Ville Syrjälä   drm/dp/mst: Use m...
2731
2732
2733
2734
2735
2736
2737
2738
2739
  	if (memchr_inv(guid, 0, 16))
  		return true;
  
  	salt = get_jiffies_64();
  
  	memcpy(&guid[0], &salt, sizeof(u64));
  	memcpy(&guid[8], &salt, sizeof(u64));
  
  	return false;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2740
  }
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
2741
2742
  static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
  			    u8 port_num, u32 offset, u8 num_bytes)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2743
2744
2745
2746
2747
2748
2749
2750
  {
  	struct drm_dp_sideband_msg_req_body req;
  
  	req.req_type = DP_REMOTE_DPCD_READ;
  	req.u.dpcd_read.port_number = port_num;
  	req.u.dpcd_read.dpcd_address = offset;
  	req.u.dpcd_read.num_bytes = num_bytes;
  	drm_dp_encode_sideband_req(&req, msg);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2751
  }
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
  
  static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
  				    bool up, u8 *msg, int len)
  {
  	int ret;
  	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
  	int tosend, total, offset;
  	int retries = 0;
  
  retry:
  	total = len;
  	offset = 0;
  	do {
  		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
  
  		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
  					&msg[offset],
  					tosend);
  		if (ret != tosend) {
  			if (ret == -EIO && retries < 5) {
  				retries++;
  				goto retry;
  			}
  			DRM_DEBUG_KMS("failed to dpcd write %d %d
  ", tosend, ret);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
  
  			return -EIO;
  		}
  		offset += tosend;
  		total -= tosend;
  	} while (total > 0);
  	return 0;
  }
  
  static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
  				  struct drm_dp_sideband_msg_tx *txmsg)
  {
  	struct drm_dp_mst_branch *mstb = txmsg->dst;
bd9343208   Mykola Lysenko   drm/dp/mst: proce...
2790
  	u8 req_type;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2791

bd9343208   Mykola Lysenko   drm/dp/mst: proce...
2792
2793
2794
2795
2796
2797
  	req_type = txmsg->msg[0] & 0x7f;
  	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
  		req_type == DP_RESOURCE_STATUS_NOTIFY)
  		hdr->broadcast = 1;
  	else
  		hdr->broadcast = 0;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2798
2799
2800
2801
2802
  	hdr->path_msg = txmsg->path_msg;
  	hdr->lct = mstb->lct;
  	hdr->lcr = mstb->lct - 1;
  	if (mstb->lct > 1)
  		memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
2803

ad7f8a1f9   Dave Airlie   drm/helper: add D...
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
  	return 0;
  }
  /*
   * process a single block of the next message in the sideband queue
   */
  static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
  				   struct drm_dp_sideband_msg_tx *txmsg,
  				   bool up)
  {
  	u8 chunk[48];
  	struct drm_dp_sideband_msg_hdr hdr;
  	int len, space, idx, tosend;
  	int ret;
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
2817
2818
  	if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
  		return 0;
bf3719c04   Damien Lespiau   drm/dp-mst-helper...
2819
  	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
2820
  	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2821
  		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2822

d308a881a   Lyude Paul   drm/dp_mst: Kill ...
2823
  	/* make hdr from dst mst */
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
  	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
  	if (ret < 0)
  		return ret;
  
  	/* amount left to send in this message */
  	len = txmsg->cur_len - txmsg->cur_offset;
  
  	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
  	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
  
  	tosend = min(len, space);
  	if (len == txmsg->cur_len)
  		hdr.somt = 1;
  	if (space >= len)
  		hdr.eomt = 1;
  
  
  	hdr.msg_len = tosend + 1;
  	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
  	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
  	/* add crc at end */
  	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
  	idx += tosend + 1;
  
  	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
f0a8f533a   Jani Nikula   drm/print: add dr...
2849
  	if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
2f015ec6e   Lyude Paul   drm/dp_mst: Add s...
2850
2851
2852
2853
2854
  		struct drm_printer p = drm_debug_printer(DBG_PREFIX);
  
  		drm_printf(&p, "sideband msg failed to send
  ");
  		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
  		return ret;
  	}
  
  	txmsg->cur_offset += tosend;
  	if (txmsg->cur_offset == txmsg->cur_len) {
  		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
  		return 1;
  	}
  	return 0;
  }
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2865
2866
2867
2868
  static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
  {
  	struct drm_dp_sideband_msg_tx *txmsg;
  	int ret;
cd961bb9e   Daniel Vetter   drm/mst: fix recu...
2869
  	WARN_ON(!mutex_is_locked(&mgr->qlock));
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2870
  	/* construct a chunk from the first msg in the tx_msg queue */
cb021a3eb   Daniel Vetter   drm/dp-mst: Remov...
2871
  	if (list_empty(&mgr->tx_msg_downq))
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2872
  		return;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2873

d308a881a   Lyude Paul   drm/dp_mst: Kill ...
2874
2875
  	txmsg = list_first_entry(&mgr->tx_msg_downq,
  				 struct drm_dp_sideband_msg_tx, next);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2876
  	ret = process_single_tx_qlock(mgr, txmsg, false);
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
2877
  	if (ret < 0) {
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2878
2879
2880
  		DRM_DEBUG_KMS("failed to send msg in q %d
  ", ret);
  		list_del(&txmsg->next);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2881
  		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
68e989dc0   Chris Wilson   drm/dp: Wait up a...
2882
  		wake_up_all(&mgr->tx_waitq);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2883
  	}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2884
  }
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2885
2886
2887
2888
2889
  static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
  				 struct drm_dp_sideband_msg_tx *txmsg)
  {
  	mutex_lock(&mgr->qlock);
  	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2f015ec6e   Lyude Paul   drm/dp_mst: Add s...
2890

f0a8f533a   Jani Nikula   drm/print: add dr...
2891
  	if (drm_debug_enabled(DRM_UT_DP)) {
2f015ec6e   Lyude Paul   drm/dp_mst: Add s...
2892
2893
2894
2895
  		struct drm_printer p = drm_debug_printer(DBG_PREFIX);
  
  		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
  	}
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
2896
  	if (list_is_singular(&mgr->tx_msg_downq))
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2897
2898
2899
  		process_single_down_tx_qlock(mgr);
  	mutex_unlock(&mgr->qlock);
  }
5950f0b79   Lyude Paul   drm/dp_mst: Move ...
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
  static void
  drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
  {
  	struct drm_dp_link_addr_reply_port *port_reply;
  	int i;
  
  	for (i = 0; i < reply->nports; i++) {
  		port_reply = &reply->ports[i];
  		DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d
  ",
  			      i,
  			      port_reply->input_port,
  			      port_reply->peer_device_type,
  			      port_reply->port_number,
  			      port_reply->dpcd_revision,
  			      port_reply->mcs,
  			      port_reply->ddps,
  			      port_reply->legacy_device_plug_status,
  			      port_reply->num_sdp_streams,
  			      port_reply->num_sdp_stream_sinks);
  	}
  }
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2922
  static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
68d8c9fc9   Dave Airlie   drm/dp/mst: updat...
2923
  				     struct drm_dp_mst_branch *mstb)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2924
  {
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2925
  	struct drm_dp_sideband_msg_tx *txmsg;
37dfdc55f   Lyude Paul   drm/dp_mst: Clean...
2926
  	struct drm_dp_link_address_ack_reply *reply;
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2927
  	struct drm_dp_mst_port *port, *tmp;
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
2928
  	int i, ret, port_mask = 0;
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2929
  	bool changed = false;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2930
2931
2932
  
  	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
  	if (!txmsg)
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2933
  		return -ENOMEM;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2934
2935
  
  	txmsg->dst = mstb;
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
2936
  	build_link_address(txmsg);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2937

68d8c9fc9   Dave Airlie   drm/dp/mst: updat...
2938
  	mstb->link_address_sent = true;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2939
  	drm_dp_queue_down_tx(mgr, txmsg);
37dfdc55f   Lyude Paul   drm/dp_mst: Clean...
2940
  	/* FIXME: Actually do some real error handling here */
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2941
  	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
37dfdc55f   Lyude Paul   drm/dp_mst: Clean...
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
  	if (ret <= 0) {
  		DRM_ERROR("Sending link address failed with %d
  ", ret);
  		goto out;
  	}
  	if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
  		DRM_ERROR("link address NAK received
  ");
  		ret = -EIO;
  		goto out;
  	}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2953

37dfdc55f   Lyude Paul   drm/dp_mst: Clean...
2954
2955
2956
2957
  	reply = &txmsg->reply.u.link_addr;
  	DRM_DEBUG_KMS("link address reply: %d
  ", reply->nports);
  	drm_dp_dump_link_address(reply);
5e93b8208   Hersen Wu   drm/dp/mst: move ...
2958

cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
2959
  	ret = drm_dp_check_mstb_guid(mstb, reply->guid);
94b6ada40   Lyude Paul   drm/dp_mst: Fix d...
2960
2961
2962
2963
2964
2965
2966
  	if (ret) {
  		char buf[64];
  
  		drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
  		DRM_ERROR("GUID check on %s failed: %d
  ",
  			  buf, ret);
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
2967
  		goto out;
94b6ada40   Lyude Paul   drm/dp_mst: Fix d...
2968
  	}
5e93b8208   Hersen Wu   drm/dp/mst: move ...
2969

6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2970
2971
2972
2973
2974
2975
2976
2977
2978
  	for (i = 0; i < reply->nports; i++) {
  		port_mask |= BIT(reply->ports[i].port_number);
  		ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
  							  &reply->ports[i]);
  		if (ret == 1)
  			changed = true;
  		else if (ret < 0)
  			goto out;
  	}
37dfdc55f   Lyude Paul   drm/dp_mst: Clean...
2979

6f85f7382   Lyude Paul   drm/dp_mst: Add b...
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
  	/* Prune any ports that are currently a part of mstb in our in-memory
  	 * topology, but were not seen in this link address. Usually this
  	 * means that they were removed while the topology was out of sync,
  	 * e.g. during suspend/resume
  	 */
  	mutex_lock(&mgr->lock);
  	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
  		if (port_mask & BIT(port->port_num))
  			continue;
  
  		DRM_DEBUG_KMS("port %d was not in link address, removing
  ",
  			      port->port_num);
  		list_del(&port->next);
  		drm_dp_mst_topology_put_port(port);
  		changed = true;
  	}
  	mutex_unlock(&mgr->lock);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
2998

37dfdc55f   Lyude Paul   drm/dp_mst: Clean...
2999
3000
3001
  out:
  	if (ret <= 0)
  		mstb->link_address_sent = false;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3002
  	kfree(txmsg);
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
3003
  	return ret < 0 ? ret : changed;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3004
  }
a4292e521   Chris Wilson   drm: Match drm_dp...
3005
3006
3007
  static void
  drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
  				   struct drm_dp_mst_branch *mstb)
f79489074   Sean Paul   drm/dp_mst: Clear...
3008
3009
  {
  	struct drm_dp_sideband_msg_tx *txmsg;
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
3010
  	int ret;
f79489074   Sean Paul   drm/dp_mst: Clear...
3011
3012
3013
3014
3015
3016
  
  	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
  	if (!txmsg)
  		return;
  
  	txmsg->dst = mstb;
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
3017
  	build_clear_payload_id_table(txmsg);
f79489074   Sean Paul   drm/dp_mst: Clear...
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
  
  	drm_dp_queue_down_tx(mgr, txmsg);
  
  	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
  	if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
  		DRM_DEBUG_KMS("clear payload table id nak received
  ");
  
  	kfree(txmsg);
  }
95b0013d2   Lyude Paul   drm/dp_mst: Refac...
3028
3029
3030
3031
  static int
  drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
  				struct drm_dp_mst_branch *mstb,
  				struct drm_dp_mst_port *port)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3032
  {
95b0013d2   Lyude Paul   drm/dp_mst: Refac...
3033
  	struct drm_dp_enum_path_resources_ack_reply *path_res;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3034
3035
3036
3037
3038
3039
3040
3041
  	struct drm_dp_sideband_msg_tx *txmsg;
  	int ret;
  
  	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
  	if (!txmsg)
  		return -ENOMEM;
  
  	txmsg->dst = mstb;
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
3042
  	build_enum_path_resources(txmsg, port->port_num);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3043
3044
3045
3046
3047
  
  	drm_dp_queue_down_tx(mgr, txmsg);
  
  	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
  	if (ret > 0) {
87212b51b   Lyude Paul   drm/dp_mst: Repro...
3048
  		ret = 0;
95b0013d2   Lyude Paul   drm/dp_mst: Refac...
3049
  		path_res = &txmsg->reply.u.path_resources;
45bbda1e3   Ville Syrjälä   drm/dp/mst: Provi...
3050
  		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3051
3052
  			DRM_DEBUG_KMS("enum path resources nak received
  ");
45bbda1e3   Ville Syrjälä   drm/dp/mst: Provi...
3053
  		} else {
95b0013d2   Lyude Paul   drm/dp_mst: Refac...
3054
  			if (port->port_num != path_res->port_number)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3055
3056
  				DRM_ERROR("got incorrect port in response
  ");
95b0013d2   Lyude Paul   drm/dp_mst: Refac...
3057
3058
3059
3060
3061
3062
  
  			DRM_DEBUG_KMS("enum path resources %d: %d %d
  ",
  				      path_res->port_number,
  				      path_res->full_payload_bw_number,
  				      path_res->avail_payload_bw_number);
87212b51b   Lyude Paul   drm/dp_mst: Repro...
3063
3064
3065
3066
3067
3068
3069
3070
  
  			/*
  			 * If something changed, make sure we send a
  			 * hotplug
  			 */
  			if (port->full_pbn != path_res->full_payload_bw_number ||
  			    port->fec_capable != path_res->fec_capable)
  				ret = 1;
fcf463807   Lyude Paul   drm/dp_mst: Use f...
3071
  			port->full_pbn = path_res->full_payload_bw_number;
a3c2b0ffc   David Francis   drm/dp_mst: Parse...
3072
  			port->fec_capable = path_res->fec_capable;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3073
3074
3075
3076
  		}
  	}
  
  	kfree(txmsg);
87212b51b   Lyude Paul   drm/dp_mst: Repro...
3077
  	return ret;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3078
  }
91a25e463   Mykola Lysenko   drm/dp/mst: deall...
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
  static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
  {
  	if (!mstb->port_parent)
  		return NULL;
  
  	if (mstb->port_parent->mstb != mstb)
  		return mstb->port_parent;
  
  	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
  }
56d1c14ec   Lyude Paul   drm/dp_mst: Resta...
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
  /*
   * Searches upwards in the topology starting from mstb to try to find the
   * closest available parent of mstb that's still connected to the rest of the
   * topology. This can be used in order to perform operations like releasing
   * payloads, where the branch device which owned the payload may no longer be
   * around and thus would require that the payload on the last living relative
   * be freed instead.
   */
  static struct drm_dp_mst_branch *
  drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
  					struct drm_dp_mst_branch *mstb,
  					int *port_num)
91a25e463   Mykola Lysenko   drm/dp/mst: deall...
3101
3102
3103
  {
  	struct drm_dp_mst_branch *rmstb = NULL;
  	struct drm_dp_mst_port *found_port;
56d1c14ec   Lyude Paul   drm/dp_mst: Resta...
3104

91a25e463   Mykola Lysenko   drm/dp/mst: deall...
3105
  	mutex_lock(&mgr->lock);
56d1c14ec   Lyude Paul   drm/dp_mst: Resta...
3106
3107
3108
3109
  	if (!mgr->mst_primary)
  		goto out;
  
  	do {
91a25e463   Mykola Lysenko   drm/dp/mst: deall...
3110
  		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
56d1c14ec   Lyude Paul   drm/dp_mst: Resta...
3111
3112
  		if (!found_port)
  			break;
91a25e463   Mykola Lysenko   drm/dp/mst: deall...
3113

56d1c14ec   Lyude Paul   drm/dp_mst: Resta...
3114
  		if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
91a25e463   Mykola Lysenko   drm/dp/mst: deall...
3115
  			rmstb = found_port->parent;
56d1c14ec   Lyude Paul   drm/dp_mst: Resta...
3116
3117
3118
3119
  			*port_num = found_port->port_num;
  		} else {
  			/* Search again, starting from this parent */
  			mstb = found_port->parent;
91a25e463   Mykola Lysenko   drm/dp/mst: deall...
3120
  		}
56d1c14ec   Lyude Paul   drm/dp_mst: Resta...
3121
3122
  	} while (!rmstb);
  out:
91a25e463   Mykola Lysenko   drm/dp/mst: deall...
3123
3124
3125
  	mutex_unlock(&mgr->lock);
  	return rmstb;
  }
8fa6a4255   Thierry Reding   drm/dp: Staticize...
3126
3127
3128
3129
  static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
  				   struct drm_dp_mst_port *port,
  				   int id,
  				   int pbn)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3130
3131
3132
  {
  	struct drm_dp_sideband_msg_tx *txmsg;
  	struct drm_dp_mst_branch *mstb;
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
3133
  	int ret, port_num;
ef8f9bea1   Libin Yang   dp/mst: add SDP s...
3134
3135
  	u8 sinks[DRM_DP_MAX_SDP_STREAMS];
  	int i;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3136

91a25e463   Mykola Lysenko   drm/dp/mst: deall...
3137
  	port_num = port->port_num;
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
3138
  	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
91a25e463   Mykola Lysenko   drm/dp/mst: deall...
3139
  	if (!mstb) {
de6d68182   Lyude Paul   drm/dp_mst: Fix s...
3140
3141
3142
  		mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
  							       port->parent,
  							       &port_num);
91a25e463   Mykola Lysenko   drm/dp/mst: deall...
3143

cfe9f9035   Lyude Paul   drm/dp_mst: Fix p...
3144
  		if (!mstb)
91a25e463   Mykola Lysenko   drm/dp/mst: deall...
3145
3146
  			return -EINVAL;
  	}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3147
3148
3149
3150
3151
3152
  
  	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
  	if (!txmsg) {
  		ret = -ENOMEM;
  		goto fail_put;
  	}
ef8f9bea1   Libin Yang   dp/mst: add SDP s...
3153
3154
  	for (i = 0; i < port->num_sdp_streams; i++)
  		sinks[i] = i;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3155
  	txmsg->dst = mstb;
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
3156
3157
3158
  	build_allocate_payload(txmsg, port_num,
  			       id,
  			       pbn, port->num_sdp_streams, sinks);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3159
3160
  
  	drm_dp_queue_down_tx(mgr, txmsg);
56d1c14ec   Lyude Paul   drm/dp_mst: Resta...
3161
3162
3163
3164
3165
3166
3167
3168
  	/*
  	 * FIXME: there is a small chance that between getting the last
  	 * connected mstb and sending the payload message, the last connected
  	 * mstb could also be removed from the topology. In the future, this
  	 * needs to be fixed by restarting the
  	 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
  	 * timeout if the topology is still connected to the system.
  	 */
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3169
3170
  	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
  	if (ret > 0) {
45bbda1e3   Ville Syrjälä   drm/dp/mst: Provi...
3171
  		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3172
  			ret = -EINVAL;
de6d68182   Lyude Paul   drm/dp_mst: Fix s...
3173
  		else
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3174
3175
3176
3177
  			ret = 0;
  	}
  	kfree(txmsg);
  fail_put:
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
3178
  	drm_dp_mst_topology_put_mstb(mstb);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3179
3180
  	return ret;
  }
0bb9c2b27   Dhinakaran Pandiyan   drm/dp/mst: Sideb...
3181
3182
3183
3184
  int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
  				 struct drm_dp_mst_port *port, bool power_up)
  {
  	struct drm_dp_sideband_msg_tx *txmsg;
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
3185
  	int ret;
0bb9c2b27   Dhinakaran Pandiyan   drm/dp/mst: Sideb...
3186

d0757afd0   Lyude Paul   drm/dp_mst: Renam...
3187
  	port = drm_dp_mst_topology_get_port_validated(mgr, port);
0bb9c2b27   Dhinakaran Pandiyan   drm/dp/mst: Sideb...
3188
3189
3190
3191
3192
  	if (!port)
  		return -EINVAL;
  
  	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
  	if (!txmsg) {
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
3193
  		drm_dp_mst_topology_put_port(port);
0bb9c2b27   Dhinakaran Pandiyan   drm/dp/mst: Sideb...
3194
3195
3196
3197
  		return -ENOMEM;
  	}
  
  	txmsg->dst = port->parent;
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
3198
  	build_power_updown_phy(txmsg, port->port_num, power_up);
0bb9c2b27   Dhinakaran Pandiyan   drm/dp/mst: Sideb...
3199
3200
3201
3202
  	drm_dp_queue_down_tx(mgr, txmsg);
  
  	ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
  	if (ret > 0) {
45bbda1e3   Ville Syrjälä   drm/dp/mst: Provi...
3203
  		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
0bb9c2b27   Dhinakaran Pandiyan   drm/dp/mst: Sideb...
3204
3205
3206
3207
3208
  			ret = -EINVAL;
  		else
  			ret = 0;
  	}
  	kfree(txmsg);
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
3209
  	drm_dp_mst_topology_put_port(port);
0bb9c2b27   Dhinakaran Pandiyan   drm/dp/mst: Sideb...
3210
3211
3212
3213
  
  	return ret;
  }
  EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
e38c298fc   Sean Paul   drm/mst: Add supp...
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
  int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
  		struct drm_dp_mst_port *port,
  		struct drm_dp_query_stream_enc_status_ack_reply *status)
  {
  	struct drm_dp_sideband_msg_tx *txmsg;
  	u8 nonce[7];
  	int len, ret;
  
  	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
  	if (!txmsg)
  		return -ENOMEM;
  
  	port = drm_dp_mst_topology_get_port_validated(mgr, port);
  	if (!port) {
  		ret = -EINVAL;
  		goto out_get_port;
  	}
  
  	get_random_bytes(nonce, sizeof(nonce));
  
  	/*
  	 * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
  	 *  transaction at the MST Branch device directly connected to the
  	 *  Source"
  	 */
  	txmsg->dst = mgr->mst_primary;
  
  	len = build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce);
  
  	drm_dp_queue_down_tx(mgr, txmsg);
  
  	ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
  	if (ret < 0) {
  		goto out;
  	} else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
  		drm_dbg_kms(mgr->dev, "query encryption status nak received
  ");
  		ret = -ENXIO;
  		goto out;
  	}
  
  	ret = 0;
  	memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
  
  out:
  	drm_dp_mst_topology_put_port(port);
  out_get_port:
  	kfree(txmsg);
  	return ret;
  }
  EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
  static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
  				       int id,
  				       struct drm_dp_payload *payload)
  {
  	int ret;
  
  	ret = drm_dp_dpcd_write_payload(mgr, id, payload);
  	if (ret < 0) {
  		payload->payload_state = 0;
  		return ret;
  	}
  	payload->payload_state = DP_PAYLOAD_LOCAL;
  	return 0;
  }
8fa6a4255   Thierry Reding   drm/dp: Staticize...
3279
3280
3281
3282
  static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
  				       struct drm_dp_mst_port *port,
  				       int id,
  				       struct drm_dp_payload *payload)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3283
3284
  {
  	int ret;
948de8423   Suraj Upadhyay   drm : Insert blan...
3285

ad7f8a1f9   Dave Airlie   drm/helper: add D...
3286
3287
3288
3289
3290
3291
  	ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
  	if (ret < 0)
  		return ret;
  	payload->payload_state = DP_PAYLOAD_REMOTE;
  	return ret;
  }
8fa6a4255   Thierry Reding   drm/dp: Staticize...
3292
3293
3294
3295
  static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
  					struct drm_dp_mst_port *port,
  					int id,
  					struct drm_dp_payload *payload)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3296
3297
3298
  {
  	DRM_DEBUG_KMS("
  ");
1e55a53a2   Matt Roper   drm: Trivial comm...
3299
  	/* it's okay for these to fail */
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3300
3301
3302
3303
3304
  	if (port) {
  		drm_dp_payload_send_msg(mgr, port, id, 0);
  	}
  
  	drm_dp_dpcd_write_payload(mgr, id, payload);
dfda0df34   Dave Airlie   drm/mst: rework p...
3305
  	payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3306
3307
  	return 0;
  }
8fa6a4255   Thierry Reding   drm/dp: Staticize...
3308
3309
3310
  static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
  					int id,
  					struct drm_dp_payload *payload)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
  {
  	payload->payload_state = 0;
  	return 0;
  }
  
  /**
   * drm_dp_update_payload_part1() - Execute payload update part 1
   * @mgr: manager to use.
   *
   * This iterates over all proposed virtual channels, and tries to
   * allocate space in the link for them. For 0->slots transitions,
   * this step just writes the VCPI to the MST device. For slots->0
   * transitions, this writes the updated VCPIs and removes the
   * remote VC payloads.
   *
   * after calling this the driver should generate ACT and payload
   * packets.
   */
  int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
  {
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3331
3332
  	struct drm_dp_payload req_payload;
  	struct drm_dp_mst_port *port;
cfe9f9035   Lyude Paul   drm/dp_mst: Fix p...
3333
3334
  	int i, j;
  	int cur_slots = 1;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3335
3336
3337
  
  	mutex_lock(&mgr->payload_lock);
  	for (i = 0; i < mgr->max_payloads; i++) {
706246c76   Lyude Paul   drm/dp_mst: Refac...
3338
3339
  		struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
  		struct drm_dp_payload *payload = &mgr->payloads[i];
cfe9f9035   Lyude Paul   drm/dp_mst: Fix p...
3340
  		bool put_port = false;
706246c76   Lyude Paul   drm/dp_mst: Refac...
3341

ad7f8a1f9   Dave Airlie   drm/helper: add D...
3342
3343
3344
  		/* solve the current payloads - compare to the hw ones
  		   - update the hw view */
  		req_payload.start_slot = cur_slots;
706246c76   Lyude Paul   drm/dp_mst: Refac...
3345
3346
3347
  		if (vcpi) {
  			port = container_of(vcpi, struct drm_dp_mst_port,
  					    vcpi);
cfe9f9035   Lyude Paul   drm/dp_mst: Fix p...
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
  
  			/* Validated ports don't matter if we're releasing
  			 * VCPI
  			 */
  			if (vcpi->num_slots) {
  				port = drm_dp_mst_topology_get_port_validated(
  				    mgr, port);
  				if (!port) {
  					mutex_unlock(&mgr->payload_lock);
  					return -EINVAL;
  				}
  				put_port = true;
263efde31   cpaul@redhat.com   drm/dp/mst: Get v...
3360
  			}
cfe9f9035   Lyude Paul   drm/dp_mst: Fix p...
3361

706246c76   Lyude Paul   drm/dp_mst: Refac...
3362
3363
  			req_payload.num_slots = vcpi->num_slots;
  			req_payload.vcpi = vcpi->vcpi;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3364
3365
3366
3367
  		} else {
  			port = NULL;
  			req_payload.num_slots = 0;
  		}
dfda0df34   Dave Airlie   drm/mst: rework p...
3368

706246c76   Lyude Paul   drm/dp_mst: Refac...
3369
  		payload->start_slot = req_payload.start_slot;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3370
  		/* work out what is required to happen with this payload */
706246c76   Lyude Paul   drm/dp_mst: Refac...
3371
  		if (payload->num_slots != req_payload.num_slots) {
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3372
3373
3374
  
  			/* need to push an update for this payload */
  			if (req_payload.num_slots) {
706246c76   Lyude Paul   drm/dp_mst: Refac...
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
  				drm_dp_create_payload_step1(mgr, vcpi->vcpi,
  							    &req_payload);
  				payload->num_slots = req_payload.num_slots;
  				payload->vcpi = req_payload.vcpi;
  
  			} else if (payload->num_slots) {
  				payload->num_slots = 0;
  				drm_dp_destroy_payload_step1(mgr, port,
  							     payload->vcpi,
  							     payload);
  				req_payload.payload_state =
  					payload->payload_state;
  				payload->start_slot = 0;
dfda0df34   Dave Airlie   drm/mst: rework p...
3388
  			}
706246c76   Lyude Paul   drm/dp_mst: Refac...
3389
  			payload->payload_state = req_payload.payload_state;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3390
3391
  		}
  		cur_slots += req_payload.num_slots;
263efde31   cpaul@redhat.com   drm/dp/mst: Get v...
3392

cfe9f9035   Lyude Paul   drm/dp_mst: Fix p...
3393
  		if (put_port)
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
3394
  			drm_dp_mst_topology_put_port(port);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3395
  	}
dfda0df34   Dave Airlie   drm/mst: rework p...
3396

e5a6ca27e   Wayne Lin   drm/dp_mst: Corre...
3397
3398
3399
  	for (i = 0; i < mgr->max_payloads; /* do nothing */) {
  		if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
  			i++;
706246c76   Lyude Paul   drm/dp_mst: Refac...
3400
  			continue;
e5a6ca27e   Wayne Lin   drm/dp_mst: Corre...
3401
  		}
dfda0df34   Dave Airlie   drm/mst: rework p...
3402

706246c76   Lyude Paul   drm/dp_mst: Refac...
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
  		DRM_DEBUG_KMS("removing payload %d
  ", i);
  		for (j = i; j < mgr->max_payloads - 1; j++) {
  			mgr->payloads[j] = mgr->payloads[j + 1];
  			mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
  
  			if (mgr->proposed_vcpis[j] &&
  			    mgr->proposed_vcpis[j]->num_slots) {
  				set_bit(j + 1, &mgr->payload_mask);
  			} else {
  				clear_bit(j + 1, &mgr->payload_mask);
  			}
dfda0df34   Dave Airlie   drm/mst: rework p...
3415
  		}
706246c76   Lyude Paul   drm/dp_mst: Refac...
3416
3417
3418
3419
3420
  
  		memset(&mgr->payloads[mgr->max_payloads - 1], 0,
  		       sizeof(struct drm_dp_payload));
  		mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
  		clear_bit(mgr->max_payloads, &mgr->payload_mask);
dfda0df34   Dave Airlie   drm/mst: rework p...
3421
  	}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
  	mutex_unlock(&mgr->payload_lock);
  
  	return 0;
  }
  EXPORT_SYMBOL(drm_dp_update_payload_part1);
  
  /**
   * drm_dp_update_payload_part2() - Execute payload update part 2
   * @mgr: manager to use.
   *
   * This iterates over all proposed virtual channels, and tries to
   * allocate space in the link for them. For 0->slots transitions,
   * this step writes the remote VC payload commands. For slots->0
   * this just resets some internal state.
   */
  int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
  {
  	struct drm_dp_mst_port *port;
  	int i;
7389ad4b6   Damien Lespiau   drm/dp-mst-helper...
3441
  	int ret = 0;
948de8423   Suraj Upadhyay   drm : Insert blan...
3442

ad7f8a1f9   Dave Airlie   drm/helper: add D...
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
  	mutex_lock(&mgr->payload_lock);
  	for (i = 0; i < mgr->max_payloads; i++) {
  
  		if (!mgr->proposed_vcpis[i])
  			continue;
  
  		port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
  
  		DRM_DEBUG_KMS("payload %d %d
  ", i, mgr->payloads[i].payload_state);
  		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
dfda0df34   Dave Airlie   drm/mst: rework p...
3454
  			ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3455
  		} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
dfda0df34   Dave Airlie   drm/mst: rework p...
3456
  			ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
  		}
  		if (ret) {
  			mutex_unlock(&mgr->payload_lock);
  			return ret;
  		}
  	}
  	mutex_unlock(&mgr->payload_lock);
  	return 0;
  }
  EXPORT_SYMBOL(drm_dp_update_payload_part2);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3467
3468
  static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
  				 struct drm_dp_mst_port *port,
562836a26   Ville Syrjälä   drm/dp_mst: Enabl...
3469
  				 int offset, int size, u8 *bytes)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3470
  {
562836a26   Ville Syrjälä   drm/dp_mst: Enabl...
3471
  	int ret = 0;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3472
  	struct drm_dp_sideband_msg_tx *txmsg;
562836a26   Ville Syrjälä   drm/dp_mst: Enabl...
3473
3474
3475
3476
3477
  	struct drm_dp_mst_branch *mstb;
  
  	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
  	if (!mstb)
  		return -EINVAL;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3478
3479
  
  	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
562836a26   Ville Syrjälä   drm/dp_mst: Enabl...
3480
3481
3482
3483
  	if (!txmsg) {
  		ret = -ENOMEM;
  		goto fail_put;
  	}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3484

cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
3485
  	build_dpcd_read(txmsg, port->port_num, offset, size);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3486
3487
3488
  	txmsg->dst = port->parent;
  
  	drm_dp_queue_down_tx(mgr, txmsg);
562836a26   Ville Syrjälä   drm/dp_mst: Enabl...
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
  	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
  	if (ret < 0)
  		goto fail_free;
  
  	/* DPCD read should never be NACKed */
  	if (txmsg->reply.reply_type == 1) {
  		DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed
  ",
  			  mstb, port->port_num, offset, size);
  		ret = -EIO;
  		goto fail_free;
  	}
  
  	if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
  		ret = -EPROTO;
  		goto fail_free;
  	}
  
  	ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
  		    size);
  	memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
  
  fail_free:
  	kfree(txmsg);
  fail_put:
  	drm_dp_mst_topology_put_mstb(mstb);
  
  	return ret;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3517
  }
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3518
3519
3520
3521
3522
  
  static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
  				  struct drm_dp_mst_port *port,
  				  int offset, int size, u8 *bytes)
  {
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3523
3524
3525
  	int ret;
  	struct drm_dp_sideband_msg_tx *txmsg;
  	struct drm_dp_mst_branch *mstb;
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
3526
  	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3527
3528
3529
3530
3531
3532
3533
3534
  	if (!mstb)
  		return -EINVAL;
  
  	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
  	if (!txmsg) {
  		ret = -ENOMEM;
  		goto fail_put;
  	}
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
3535
  	build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3536
3537
3538
3539
3540
  	txmsg->dst = mstb;
  
  	drm_dp_queue_down_tx(mgr, txmsg);
  
  	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
dbc05ae38   Lyude Paul   drm/dp_mst: Fix d...
3541
3542
3543
3544
3545
3546
  	if (ret > 0) {
  		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
  			ret = -EIO;
  		else
  			ret = size;
  	}
4c0a9b62b   Lyude Paul   drm/dp_mst: Make ...
3547

ad7f8a1f9   Dave Airlie   drm/helper: add D...
3548
3549
  	kfree(txmsg);
  fail_put:
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
3550
  	drm_dp_mst_topology_put_mstb(mstb);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3551
3552
3553
3554
3555
3556
  	return ret;
  }
  
  static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
  {
  	struct drm_dp_sideband_msg_reply_body reply;
45bbda1e3   Ville Syrjälä   drm/dp/mst: Provi...
3557
  	reply.reply_type = DP_SIDEBAND_REPLY_ACK;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3558
3559
3560
3561
3562
3563
3564
  	reply.req_type = req_type;
  	drm_dp_encode_sideband_reply(&reply, msg);
  	return 0;
  }
  
  static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
  				    struct drm_dp_mst_branch *mstb,
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
3565
  				    int req_type, bool broadcast)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3566
3567
3568
3569
3570
3571
3572
3573
  {
  	struct drm_dp_sideband_msg_tx *txmsg;
  
  	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
  	if (!txmsg)
  		return -ENOMEM;
  
  	txmsg->dst = mstb;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3574
3575
3576
  	drm_dp_encode_up_ack_reply(txmsg, req_type);
  
  	mutex_lock(&mgr->qlock);
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
3577
3578
  	/* construct a chunk from the first msg in the tx_msg queue */
  	process_single_tx_qlock(mgr, txmsg, true);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3579
  	mutex_unlock(&mgr->qlock);
1f16ee7fa   Mykola Lysenko   drm/dp/mst: alway...
3580
3581
  
  	kfree(txmsg);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3582
3583
  	return 0;
  }
0c3bb15cf   Ville Syrjälä   drm/dp/mst: Handl...
3584
  static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8  dp_link_count)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3585
  {
0c3bb15cf   Ville Syrjälä   drm/dp/mst: Handl...
3586
  	if (dp_link_bw == 0 || dp_link_count == 0)
b853fdb3c   Chris Wilson   drm/dp/mst: Handl...
3587
3588
3589
  		DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)
  ",
  			      dp_link_bw, dp_link_count);
b853fdb3c   Chris Wilson   drm/dp/mst: Handl...
3590

0c3bb15cf   Ville Syrjälä   drm/dp/mst: Handl...
3591
  	return dp_link_bw * dp_link_count / 2;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3592
3593
3594
  }
  
  /**
4b4659128   Lyude Paul   drm/i915/dp: Extr...
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
   * drm_dp_read_mst_cap() - check whether or not a sink supports MST
   * @aux: The DP AUX channel to use
   * @dpcd: A cached copy of the DPCD capabilities for this sink
   *
   * Returns: %True if the sink supports MST, %false otherwise
   */
  bool drm_dp_read_mst_cap(struct drm_dp_aux *aux,
  			 const u8 dpcd[DP_RECEIVER_CAP_SIZE])
  {
  	u8 mstm_cap;
  
  	if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
  		return false;
  
  	if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
  		return false;
  
  	return mstm_cap & DP_MST_CAP;
  }
  EXPORT_SYMBOL(drm_dp_read_mst_cap);
  
  /**
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
   * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
   * @mgr: manager to set state for
   * @mst_state: true to enable MST on this connector - false to disable.
   *
   * This is called by the driver when it detects an MST capable device plugged
   * into a DP MST capable port, or when a DP MST capable device is unplugged.
   */
  int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
  {
  	int ret = 0;
  	struct drm_dp_mst_branch *mstb = NULL;
8732fe46b   Lyude Paul   drm/dp_mst: Fix c...
3628
  	mutex_lock(&mgr->payload_lock);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3629
3630
3631
3632
3633
3634
3635
  	mutex_lock(&mgr->lock);
  	if (mst_state == mgr->mst_state)
  		goto out_unlock;
  
  	mgr->mst_state = mst_state;
  	/* set the device into MST mode */
  	if (mst_state) {
7a3cbf590   José Roberto de Souza   drm/mst: Some sty...
3636
  		struct drm_dp_payload reset_pay;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3637
3638
3639
3640
3641
3642
3643
3644
3645
  		WARN_ON(mgr->mst_primary);
  
  		/* get dpcd info */
  		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
  		if (ret != DP_RECEIVER_CAP_SIZE) {
  			DRM_DEBUG_KMS("failed to read DPCD
  ");
  			goto out_unlock;
  		}
0c3bb15cf   Ville Syrjälä   drm/dp/mst: Handl...
3646
3647
3648
  		mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
  							mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
  		if (mgr->pbn_div == 0) {
b853fdb3c   Chris Wilson   drm/dp/mst: Handl...
3649
3650
3651
  			ret = -EINVAL;
  			goto out_unlock;
  		}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
  		/* add initial branch device at LCT 1 */
  		mstb = drm_dp_add_mst_branch_device(1, NULL);
  		if (mstb == NULL) {
  			ret = -ENOMEM;
  			goto out_unlock;
  		}
  		mstb->mgr = mgr;
  
  		/* give this the main reference */
  		mgr->mst_primary = mstb;
ebcc0e6b5   Lyude Paul   drm/dp_mst: Intro...
3662
  		drm_dp_mst_topology_get_mstb(mgr->mst_primary);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3663

c175cd16d   Andrey Grodzovsky   drm/dp/mst: Rever...
3664
  		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3ff4c24bd   Lyude Paul   drm/dp_mst: Fix i...
3665
3666
3667
  					 DP_MST_EN |
  					 DP_UP_REQ_EN |
  					 DP_UPSTREAM_IS_SRC);
7a3cbf590   José Roberto de Souza   drm/mst: Some sty...
3668
  		if (ret < 0)
c175cd16d   Andrey Grodzovsky   drm/dp/mst: Rever...
3669
  			goto out_unlock;
c175cd16d   Andrey Grodzovsky   drm/dp/mst: Rever...
3670

7a3cbf590   José Roberto de Souza   drm/mst: Some sty...
3671
3672
3673
  		reset_pay.start_slot = 0;
  		reset_pay.num_slots = 0x3f;
  		drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3674

ad7f8a1f9   Dave Airlie   drm/helper: add D...
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
  		queue_work(system_long_wq, &mgr->work);
  
  		ret = 0;
  	} else {
  		/* disable MST on the device */
  		mstb = mgr->mst_primary;
  		mgr->mst_primary = NULL;
  		/* this can fail if the device is gone */
  		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
  		ret = 0;
8732fe46b   Lyude Paul   drm/dp_mst: Fix c...
3685
3686
3687
3688
  		memset(mgr->payloads, 0,
  		       mgr->max_payloads * sizeof(mgr->payloads[0]));
  		memset(mgr->proposed_vcpis, 0,
  		       mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3689
3690
  		mgr->payload_mask = 0;
  		set_bit(0, &mgr->payload_mask);
dfda0df34   Dave Airlie   drm/mst: rework p...
3691
  		mgr->vcpi_mask = 0;
f79489074   Sean Paul   drm/dp_mst: Clear...
3692
  		mgr->payload_id_table_cleared = false;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3693
3694
3695
3696
  	}
  
  out_unlock:
  	mutex_unlock(&mgr->lock);
8732fe46b   Lyude Paul   drm/dp_mst: Fix c...
3697
  	mutex_unlock(&mgr->payload_lock);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3698
  	if (mstb)
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
3699
  		drm_dp_mst_topology_put_mstb(mstb);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3700
3701
3702
3703
  	return ret;
  
  }
  EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
3704
3705
3706
3707
3708
3709
3710
  static void
  drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
  {
  	struct drm_dp_mst_port *port;
  
  	/* The link address will need to be re-sent on resume */
  	mstb->link_address_sent = false;
87212b51b   Lyude Paul   drm/dp_mst: Repro...
3711
  	list_for_each_entry(port, &mstb->ports, next)
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
3712
3713
  		if (port->mstb)
  			drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
3714
  }
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
  /**
   * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
   * @mgr: manager to suspend
   *
   * This function tells the MST device that we can't handle UP messages
   * anymore. This should stop it from sending any since we are suspended.
   */
  void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
  {
  	mutex_lock(&mgr->lock);
  	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
  			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
  	mutex_unlock(&mgr->lock);
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
3728
  	flush_work(&mgr->up_req_work);
274d83524   Dave Airlie   drm/dp/mst: drop ...
3729
  	flush_work(&mgr->work);
7cb12d483   Lyude Paul   drm/dp_mst: Destr...
3730
  	flush_work(&mgr->delayed_destroy_work);
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
3731
3732
3733
3734
3735
  
  	mutex_lock(&mgr->lock);
  	if (mgr->mst_state && mgr->mst_primary)
  		drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
  	mutex_unlock(&mgr->lock);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3736
3737
3738
3739
3740
3741
  }
  EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
  
  /**
   * drm_dp_mst_topology_mgr_resume() - resume the MST manager
   * @mgr: manager to resume
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
3742
   * @sync: whether or not to perform topology reprobing synchronously
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3743
3744
3745
3746
   *
   * This will fetch DPCD and see if the device is still there,
   * if it is, it will rewrite the MSTM control bits, and return.
   *
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
3747
   * If the device fails this returns -1, and the driver should do
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3748
   * a full MST reprobe, in case we were undocked.
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
3749
3750
3751
3752
3753
3754
3755
3756
3757
   *
   * During system resume (where it is assumed that the driver will be calling
   * drm_atomic_helper_resume()) this function should be called beforehand with
   * @sync set to true. In contexts like runtime resume where the driver is not
   * expected to be calling drm_atomic_helper_resume(), this function should be
   * called with @sync set to false in order to avoid deadlocking.
   *
   * Returns: -1 if the MST topology was removed while we were suspended, 0
   * otherwise.
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3758
   */
6f85f7382   Lyude Paul   drm/dp_mst: Add b...
3759
3760
  int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
  				   bool sync)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3761
  {
79413ed4a   Lyude Paul   drm/dp_mst: Lesse...
3762
3763
  	int ret;
  	u8 guid[16];
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3764
3765
  
  	mutex_lock(&mgr->lock);
79413ed4a   Lyude Paul   drm/dp_mst: Lesse...
3766
3767
  	if (!mgr->mst_primary)
  		goto out_fail;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3768

79413ed4a   Lyude Paul   drm/dp_mst: Lesse...
3769
3770
3771
3772
3773
3774
3775
  	ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
  			       DP_RECEIVER_CAP_SIZE);
  	if (ret != DP_RECEIVER_CAP_SIZE) {
  		DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?
  ");
  		goto out_fail;
  	}
1652fce65   Lyude   drm/dp/mst: Resto...
3776

79413ed4a   Lyude Paul   drm/dp_mst: Lesse...
3777
3778
3779
3780
3781
3782
3783
3784
3785
  	ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
  				 DP_MST_EN |
  				 DP_UP_REQ_EN |
  				 DP_UPSTREAM_IS_SRC);
  	if (ret < 0) {
  		DRM_DEBUG_KMS("mst write failed - undocked during suspend?
  ");
  		goto out_fail;
  	}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3786

79413ed4a   Lyude Paul   drm/dp_mst: Lesse...
3787
3788
3789
3790
3791
3792
3793
  	/* Some hubs forget their guids after they resume */
  	ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
  	if (ret != 16) {
  		DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?
  ");
  		goto out_fail;
  	}
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
3794
3795
3796
3797
3798
3799
3800
  
  	ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
  	if (ret) {
  		DRM_DEBUG_KMS("check mstb failed - undocked during suspend?
  ");
  		goto out_fail;
  	}
1652fce65   Lyude   drm/dp/mst: Resto...
3801

6f85f7382   Lyude Paul   drm/dp_mst: Add b...
3802
3803
3804
3805
3806
3807
  	/*
  	 * For the final step of resuming the topology, we need to bring the
  	 * state of our in-memory topology back into sync with reality. So,
  	 * restart the probing process as if we're probing a new hub
  	 */
  	queue_work(system_long_wq, &mgr->work);
79413ed4a   Lyude Paul   drm/dp_mst: Lesse...
3808
  	mutex_unlock(&mgr->lock);
1652fce65   Lyude   drm/dp/mst: Resto...
3809

6f85f7382   Lyude Paul   drm/dp_mst: Add b...
3810
3811
3812
3813
3814
  	if (sync) {
  		DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...
  ");
  		flush_work(&mgr->work);
  	}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3815

79413ed4a   Lyude Paul   drm/dp_mst: Lesse...
3816
  	return 0;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3817

79413ed4a   Lyude Paul   drm/dp_mst: Lesse...
3818
  out_fail:
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3819
  	mutex_unlock(&mgr->lock);
79413ed4a   Lyude Paul   drm/dp_mst: Lesse...
3820
  	return -1;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3821
3822
  }
  EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
3823
3824
3825
  static bool
  drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
  		      struct drm_dp_mst_branch **mstb)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3826
3827
3828
  {
  	int len;
  	u8 replyblock[32];
b37ea8bff   Bo YU   drm/drm_dp_mst:re...
3829
  	int replylen, curreply;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3830
  	int ret;
21a729d00   Sean Paul   drm/mst: Separate...
3831
3832
  	u8 hdrlen;
  	struct drm_dp_sideband_msg_hdr hdr;
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
3833
3834
  	struct drm_dp_sideband_msg_rx *msg =
  		up ? &mgr->up_req_recv : &mgr->down_rep_recv;
21a729d00   Sean Paul   drm/mst: Separate...
3835
3836
  	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
  			   DP_SIDEBAND_MSG_DOWN_REP_BASE;
cbfb1b744   Lyude Paul   drm/dp_mst: Fix N...
3837
3838
  	if (!up)
  		*mstb = NULL;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3839
3840
  
  	len = min(mgr->max_dpcd_transaction_bytes, 16);
21a729d00   Sean Paul   drm/mst: Separate...
3841
  	ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3842
3843
3844
  	if (ret != len) {
  		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d
  ", len, ret);
636c4c3e7   Imre Deak   drm/mst: Avoid pr...
3845
  		return false;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3846
  	}
21a729d00   Sean Paul   drm/mst: Separate...
3847
3848
3849
3850
3851
3852
3853
3854
3855
  
  	ret = drm_dp_decode_sideband_msg_hdr(&hdr, replyblock, len, &hdrlen);
  	if (ret == false) {
  		print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
  			       1, replyblock, len, false);
  		DRM_DEBUG_KMS("ERROR: failed header
  ");
  		return false;
  	}
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
3856
  	if (!up) {
fbc821c4a   Sean Paul   drm/mst: Support ...
3857
3858
3859
3860
3861
3862
3863
3864
  		/* Caller is responsible for giving back this reference */
  		*mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
  		if (!*mstb) {
  			DRM_DEBUG_KMS("Got MST reply from unknown device %d
  ",
  				      hdr.lct);
  			return false;
  		}
fbc821c4a   Sean Paul   drm/mst: Support ...
3865
  	}
21a729d00   Sean Paul   drm/mst: Separate...
3866
3867
3868
3869
3870
3871
3872
3873
3874
  	if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
  		DRM_DEBUG_KMS("sideband msg set header failed %d
  ",
  			      replyblock[0]);
  		return false;
  	}
  
  	replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
  	ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3875
3876
3877
  	if (!ret) {
  		DRM_DEBUG_KMS("sideband msg build failed %d
  ", replyblock[0]);
636c4c3e7   Imre Deak   drm/mst: Avoid pr...
3878
  		return false;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3879
  	}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3880

21a729d00   Sean Paul   drm/mst: Separate...
3881
  	replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3882
3883
3884
3885
3886
3887
  	curreply = len;
  	while (replylen > 0) {
  		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
  		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
  				    replyblock, len);
  		if (ret != len) {
448421b5e   Imre Deak   drm/mst: Fix erro...
3888
3889
3890
  			DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)
  ",
  				      len, ret);
636c4c3e7   Imre Deak   drm/mst: Avoid pr...
3891
  			return false;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3892
  		}
448421b5e   Imre Deak   drm/mst: Fix erro...
3893

21a729d00   Sean Paul   drm/mst: Separate...
3894
  		ret = drm_dp_sideband_append_payload(msg, replyblock, len);
448421b5e   Imre Deak   drm/mst: Fix erro...
3895
  		if (!ret) {
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3896
3897
  			DRM_DEBUG_KMS("failed to build sideband msg
  ");
636c4c3e7   Imre Deak   drm/mst: Avoid pr...
3898
  			return false;
448421b5e   Imre Deak   drm/mst: Fix erro...
3899
  		}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3900
3901
3902
  		curreply += len;
  		replylen -= len;
  	}
636c4c3e7   Imre Deak   drm/mst: Avoid pr...
3903
  	return true;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3904
3905
3906
3907
  }
  
  static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
  {
8b1e589d1   Lyude Paul   drm/dp_mst: Refac...
3908
  	struct drm_dp_sideband_msg_tx *txmsg;
fbc821c4a   Sean Paul   drm/mst: Support ...
3909
  	struct drm_dp_mst_branch *mstb = NULL;
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
3910
  	struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3911

d308a881a   Lyude Paul   drm/dp_mst: Kill ...
3912
3913
  	if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
  		goto out;
8b1e589d1   Lyude Paul   drm/dp_mst: Refac...
3914

fbc821c4a   Sean Paul   drm/mst: Support ...
3915
3916
3917
  	/* Multi-packet message transmission, don't clear the reply */
  	if (!msg->have_eomt)
  		goto out;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3918

8b1e589d1   Lyude Paul   drm/dp_mst: Refac...
3919
  	/* find the message */
8b1e589d1   Lyude Paul   drm/dp_mst: Refac...
3920
  	mutex_lock(&mgr->qlock);
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
3921
3922
  	txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
  					 struct drm_dp_sideband_msg_tx, next);
8b1e589d1   Lyude Paul   drm/dp_mst: Refac...
3923
  	mutex_unlock(&mgr->qlock);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3924

d308a881a   Lyude Paul   drm/dp_mst: Kill ...
3925
3926
  	/* Were we actually expecting a response, and from this mstb? */
  	if (!txmsg || txmsg->dst != mstb) {
fbc821c4a   Sean Paul   drm/mst: Support ...
3927
  		struct drm_dp_sideband_msg_hdr *hdr;
948de8423   Suraj Upadhyay   drm : Insert blan...
3928

fbc821c4a   Sean Paul   drm/mst: Support ...
3929
  		hdr = &msg->initial_hdr;
8b1e589d1   Lyude Paul   drm/dp_mst: Refac...
3930
3931
3932
  		DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x
  ",
  			      mstb, hdr->seqno, hdr->lct, hdr->rad[0],
fbc821c4a   Sean Paul   drm/mst: Support ...
3933
3934
  			      msg->msg[0]);
  		goto out_clear_reply;
8b1e589d1   Lyude Paul   drm/dp_mst: Refac...
3935
  	}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3936

fbc821c4a   Sean Paul   drm/mst: Support ...
3937
  	drm_dp_sideband_parse_reply(msg, &txmsg->reply);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3938

fbc821c4a   Sean Paul   drm/mst: Support ...
3939
  	if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
8b1e589d1   Lyude Paul   drm/dp_mst: Refac...
3940
3941
3942
3943
3944
3945
3946
  		DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x
  ",
  			      txmsg->reply.req_type,
  			      drm_dp_mst_req_type_str(txmsg->reply.req_type),
  			      txmsg->reply.u.nak.reason,
  			      drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
  			      txmsg->reply.u.nak.nak_data);
fbc821c4a   Sean Paul   drm/mst: Support ...
3947
  	}
45bbda1e3   Ville Syrjälä   drm/dp/mst: Provi...
3948

fbc821c4a   Sean Paul   drm/mst: Support ...
3949
  	memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
8b1e589d1   Lyude Paul   drm/dp_mst: Refac...
3950
  	drm_dp_mst_topology_put_mstb(mstb);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3951

8b1e589d1   Lyude Paul   drm/dp_mst: Refac...
3952
3953
  	mutex_lock(&mgr->qlock);
  	txmsg->state = DRM_DP_SIDEBAND_TX_RX;
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
3954
  	list_del(&txmsg->next);
8b1e589d1   Lyude Paul   drm/dp_mst: Refac...
3955
  	mutex_unlock(&mgr->qlock);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3956

8b1e589d1   Lyude Paul   drm/dp_mst: Refac...
3957
3958
3959
  	wake_up_all(&mgr->tx_waitq);
  
  	return 0;
fbc821c4a   Sean Paul   drm/mst: Support ...
3960
  out_clear_reply:
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
3961
  	memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
fbc821c4a   Sean Paul   drm/mst: Support ...
3962
3963
3964
  out:
  	if (mstb)
  		drm_dp_mst_topology_put_mstb(mstb);
8b1e589d1   Lyude Paul   drm/dp_mst: Refac...
3965
3966
  
  	return 0;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
3967
  }
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
3968
  static inline bool
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
3969
3970
3971
3972
3973
3974
  drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
  			  struct drm_dp_pending_up_req *up_req)
  {
  	struct drm_dp_mst_branch *mstb = NULL;
  	struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
  	struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
3975
  	bool hotplug = false;
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
3976
3977
3978
3979
3980
3981
3982
3983
  
  	if (hdr->broadcast) {
  		const u8 *guid = NULL;
  
  		if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
  			guid = msg->u.conn_stat.guid;
  		else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
  			guid = msg->u.resource_stat.guid;
8ccb5bf76   José Roberto de Souza   drm/mst: Fix poss...
3984
3985
  		if (guid)
  			mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
3986
3987
3988
3989
3990
3991
3992
3993
  	} else {
  		mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
  	}
  
  	if (!mstb) {
  		DRM_DEBUG_KMS("Got MST reply from unknown device %d
  ",
  			      hdr->lct);
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
3994
  		return false;
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
3995
3996
3997
3998
3999
  	}
  
  	/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
  	if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
  		drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
4000
  		hotplug = true;
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
4001
4002
4003
  	}
  
  	drm_dp_mst_topology_put_mstb(mstb);
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
4004
  	return hotplug;
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
4005
4006
4007
4008
4009
4010
4011
4012
  }
  
  static void drm_dp_mst_up_req_work(struct work_struct *work)
  {
  	struct drm_dp_mst_topology_mgr *mgr =
  		container_of(work, struct drm_dp_mst_topology_mgr,
  			     up_req_work);
  	struct drm_dp_pending_up_req *up_req;
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
4013
  	bool send_hotplug = false;
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
4014

14692a363   Lyude Paul   drm/dp_mst: Add p...
4015
  	mutex_lock(&mgr->probe_lock);
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
  	while (true) {
  		mutex_lock(&mgr->up_req_lock);
  		up_req = list_first_entry_or_null(&mgr->up_req_list,
  						  struct drm_dp_pending_up_req,
  						  next);
  		if (up_req)
  			list_del(&up_req->next);
  		mutex_unlock(&mgr->up_req_lock);
  
  		if (!up_req)
  			break;
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
4027
  		send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
4028
4029
  		kfree(up_req);
  	}
14692a363   Lyude Paul   drm/dp_mst: Add p...
4030
  	mutex_unlock(&mgr->probe_lock);
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
4031
4032
4033
  
  	if (send_hotplug)
  		drm_kms_helper_hotplug_event(mgr->dev);
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
4034
  }
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4035
4036
  static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
  {
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
4037
  	struct drm_dp_pending_up_req *up_req;
636c4c3e7   Imre Deak   drm/mst: Avoid pr...
4038

d308a881a   Lyude Paul   drm/dp_mst: Kill ...
4039
  	if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
a29d88187   Lyude Paul   drm/dp_mst: Refac...
4040
  		goto out;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4041

60f9ae9d0   Lyude Paul   drm/dp_mst: Remov...
4042
4043
  	if (!mgr->up_req_recv.have_eomt)
  		return 0;
bd9343208   Mykola Lysenko   drm/dp/mst: proce...
4044

9408cc94e   Lyude Paul   drm/dp_mst: Handl...
4045
4046
4047
4048
4049
  	up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
  	if (!up_req) {
  		DRM_ERROR("Not enough memory to process MST up req
  ");
  		return -ENOMEM;
60f9ae9d0   Lyude Paul   drm/dp_mst: Remov...
4050
  	}
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
4051
  	INIT_LIST_HEAD(&up_req->next);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4052

9408cc94e   Lyude Paul   drm/dp_mst: Handl...
4053
  	drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
bd9343208   Mykola Lysenko   drm/dp/mst: proce...
4054

9408cc94e   Lyude Paul   drm/dp_mst: Handl...
4055
4056
4057
4058
4059
4060
  	if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
  	    up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
  		DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x
  ",
  			      up_req->msg.req_type);
  		kfree(up_req);
a29d88187   Lyude Paul   drm/dp_mst: Refac...
4061
  		goto out;
a29d88187   Lyude Paul   drm/dp_mst: Refac...
4062
  	}
5e93b8208   Hersen Wu   drm/dp/mst: move ...
4063

9408cc94e   Lyude Paul   drm/dp_mst: Handl...
4064
  	drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
4065
  				 false);
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
4066
4067
4068
4069
  
  	if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
  		const struct drm_dp_connection_status_notify *conn_stat =
  			&up_req->msg.u.conn_stat;
8ae22cb41   Dave Airlie   Revert "drm/dp/ms...
4070

a29d88187   Lyude Paul   drm/dp_mst: Refac...
4071
4072
  		DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d
  ",
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
4073
4074
4075
4076
4077
4078
4079
4080
4081
  			      conn_stat->port_number,
  			      conn_stat->legacy_device_plug_status,
  			      conn_stat->displayport_device_plug_status,
  			      conn_stat->message_capability_status,
  			      conn_stat->input_port,
  			      conn_stat->peer_device_type);
  	} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
  		const struct drm_dp_resource_status_notify *res_stat =
  			&up_req->msg.u.resource_stat;
bd9343208   Mykola Lysenko   drm/dp/mst: proce...
4082

a29d88187   Lyude Paul   drm/dp_mst: Refac...
4083
4084
  		DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d
  ",
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
4085
4086
  			      res_stat->port_number,
  			      res_stat->available_pbn);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4087
  	}
60f9ae9d0   Lyude Paul   drm/dp_mst: Remov...
4088

fbc821c4a   Sean Paul   drm/mst: Support ...
4089
  	up_req->hdr = mgr->up_req_recv.initial_hdr;
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
4090
4091
4092
4093
  	mutex_lock(&mgr->up_req_lock);
  	list_add_tail(&up_req->next, &mgr->up_req_list);
  	mutex_unlock(&mgr->up_req_lock);
  	queue_work(system_long_wq, &mgr->up_req_work);
a29d88187   Lyude Paul   drm/dp_mst: Refac...
4094
  out:
60f9ae9d0   Lyude Paul   drm/dp_mst: Remov...
4095
  	memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
60f9ae9d0   Lyude Paul   drm/dp_mst: Remov...
4096
  	return 0;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4097
4098
4099
4100
4101
4102
  }
  
  /**
   * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
   * @mgr: manager to notify irq for.
   * @esi: 4 bytes from SINK_COUNT_ESI
295ee8531   Daniel Vetter   drm: Docbook fixes
4103
   * @handled: whether the hpd interrupt was consumed or not
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
   *
   * This should be called from the driver when it detects a short IRQ,
   * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
   * topology manager will process the sideband messages received as a result
   * of this.
   */
  int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
  {
  	int ret = 0;
  	int sc;
  	*handled = false;
  	sc = esi[0] & 0x3f;
  
  	if (sc != mgr->sink_count) {
  		mgr->sink_count = sc;
  		*handled = true;
  	}
  
  	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
  		ret = drm_dp_mst_handle_down_rep(mgr);
  		*handled = true;
  	}
  
  	if (esi[1] & DP_UP_REQ_MSG_RDY) {
  		ret |= drm_dp_mst_handle_up_req(mgr);
  		*handled = true;
  	}
  
  	drm_dp_mst_kick_tx(mgr);
  	return ret;
  }
  EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
  
  /**
   * drm_dp_mst_detect_port() - get connection status for an MST port
132d49d72   Daniel Vetter   drm/dp-mst: Missi...
4139
   * @connector: DRM connector for this port
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
4140
   * @ctx: The acquisition context to use for grabbing locks
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4141
   * @mgr: manager for this port
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
4142
   * @port: pointer to a port
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4143
   *
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
4144
   * This returns the current connection state for a port.
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4145
   */
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
4146
4147
4148
4149
4150
  int
  drm_dp_mst_detect_port(struct drm_connector *connector,
  		       struct drm_modeset_acquire_ctx *ctx,
  		       struct drm_dp_mst_topology_mgr *mgr,
  		       struct drm_dp_mst_port *port)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4151
  {
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
4152
  	int ret;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4153

1e55a53a2   Matt Roper   drm: Trivial comm...
4154
  	/* we need to search for the port in the mgr in case it's gone */
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
4155
  	port = drm_dp_mst_topology_get_port_validated(mgr, port);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4156
4157
  	if (!port)
  		return connector_status_disconnected;
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
4158
4159
4160
4161
4162
  	ret = drm_modeset_lock(&mgr->base.lock, ctx);
  	if (ret)
  		goto out;
  
  	ret = connector_status_disconnected;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4163
4164
4165
4166
4167
4168
  	if (!port->ddps)
  		goto out;
  
  	switch (port->pdt) {
  	case DP_PEER_DEVICE_NONE:
  	case DP_PEER_DEVICE_MST_BRANCHING:
db1a07956   Wayne Lin   drm/dp_mst: Handl...
4169
4170
  		if (!port->mcs)
  			ret = connector_status_connected;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4171
4172
4173
  		break;
  
  	case DP_PEER_DEVICE_SST_SINK:
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
4174
  		ret = connector_status_connected;
8ae22cb41   Dave Airlie   Revert "drm/dp/ms...
4175
4176
4177
4178
  		/* for logical ports - cache the EDID */
  		if (port->port_num >= 8 && !port->cached_edid) {
  			port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
  		}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4179
4180
4181
  		break;
  	case DP_PEER_DEVICE_DP_LEGACY_CONV:
  		if (port->ldps)
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
4182
  			ret = connector_status_connected;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4183
4184
4185
  		break;
  	}
  out:
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
4186
  	drm_dp_mst_topology_put_port(port);
3f9b3f02d   Lyude Paul   drm/dp_mst: Prote...
4187
  	return ret;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
  }
  EXPORT_SYMBOL(drm_dp_mst_detect_port);
  
  /**
   * drm_dp_mst_get_edid() - get EDID for an MST port
   * @connector: toplevel connector to get EDID for
   * @mgr: manager for this port
   * @port: unverified pointer to a port.
   *
   * This returns an EDID for the port connected to a connector,
   * It validates the pointer still exists so the caller doesn't require a
   * reference.
   */
  struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
  {
  	struct edid *edid = NULL;
1e55a53a2   Matt Roper   drm: Trivial comm...
4204
  	/* we need to search for the port in the mgr in case it's gone */
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
4205
  	port = drm_dp_mst_topology_get_port_validated(mgr, port);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4206
4207
  	if (!port)
  		return NULL;
c6a0aed4d   Dave Airlie   drm/mst: cached E...
4208
4209
  	if (port->cached_edid)
  		edid = drm_edid_duplicate(port->cached_edid);
8ae22cb41   Dave Airlie   Revert "drm/dp/ms...
4210
4211
  	else {
  		edid = drm_get_edid(connector, &port->aux.ddc);
8ae22cb41   Dave Airlie   Revert "drm/dp/ms...
4212
  	}
ef8f9bea1   Libin Yang   dp/mst: add SDP s...
4213
  	port->has_audio = drm_detect_monitor_audio(edid);
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
4214
  	drm_dp_mst_topology_put_port(port);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4215
4216
4217
4218
4219
  	return edid;
  }
  EXPORT_SYMBOL(drm_dp_mst_get_edid);
  
  /**
e4b0c8681   Lyude Paul   drm/dp_mst: Depre...
4220
   * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4221
4222
   * @mgr: manager to use
   * @pbn: payload bandwidth to convert into slots.
e4b0c8681   Lyude Paul   drm/dp_mst: Depre...
4223
4224
4225
4226
4227
4228
4229
   *
   * Calculate the number of VCPI slots that will be required for the given PBN
   * value. This function is deprecated, and should not be used in atomic
   * drivers.
   *
   * RETURNS:
   * The total slots required for this port, or error.
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4230
4231
4232
4233
4234
4235
4236
   */
  int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
  			   int pbn)
  {
  	int num_slots;
  
  	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
feb2c3bc3   Pandiyan, Dhinakaran   drm/dp: Kill unus...
4237
4238
  	/* max. time slots - one slot for MTP header */
  	if (num_slots > 63)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4239
4240
4241
4242
4243
4244
  		return -ENOSPC;
  	return num_slots;
  }
  EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
  
  static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
1e797f556   Pandiyan, Dhinakaran   drm/dp: Split drm...
4245
  			    struct drm_dp_vcpi *vcpi, int pbn, int slots)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4246
  {
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4247
  	int ret;
feb2c3bc3   Pandiyan, Dhinakaran   drm/dp: Kill unus...
4248
  	/* max. time slots - one slot for MTP header */
1e797f556   Pandiyan, Dhinakaran   drm/dp: Split drm...
4249
  	if (slots > 63)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4250
4251
4252
  		return -ENOSPC;
  
  	vcpi->pbn = pbn;
1e797f556   Pandiyan, Dhinakaran   drm/dp: Split drm...
4253
4254
  	vcpi->aligned_pbn = slots * mgr->pbn_div;
  	vcpi->num_slots = slots;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4255
4256
4257
4258
4259
4260
4261
4262
  
  	ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
  	if (ret < 0)
  		return ret;
  	return 0;
  }
  
  /**
eceae1472   Lyude Paul   drm/dp_mst: Start...
4263
   * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
edb1ed1ab   Pandiyan, Dhinakaran   drm/dp: Add DP MS...
4264
4265
4266
4267
   * @state: global atomic state
   * @mgr: MST topology manager for the port
   * @port: port to find vcpi slots for
   * @pbn: bandwidth required for the mode in PBN
1c6c1cb5a   Mikita Lipski   drm/dp_mst: Manua...
4268
   * @pbn_div: divider for DSC mode that takes FEC into account
edb1ed1ab   Pandiyan, Dhinakaran   drm/dp: Add DP MS...
4269
   *
eceae1472   Lyude Paul   drm/dp_mst: Start...
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
   * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
   * may have had. Any atomic drivers which support MST must call this function
   * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
   * current VCPI allocation for the new state, but only when
   * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
   * to ensure compatibility with userspace applications that still use the
   * legacy modesetting UAPI.
   *
   * Allocations set by this function are not checked against the bandwidth
   * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
   *
   * Additionally, it is OK to call this function multiple times on the same
   * @port as needed. It is not OK however, to call this function and
   * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
   *
   * See also:
   * drm_dp_atomic_release_vcpi_slots()
   * drm_dp_mst_atomic_check()
   *
   * Returns:
   * Total slots in the atomic state assigned for this port, or a negative error
   * code if the port no longer exists
edb1ed1ab   Pandiyan, Dhinakaran   drm/dp: Add DP MS...
4292
4293
4294
   */
  int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
  				  struct drm_dp_mst_topology_mgr *mgr,
1c6c1cb5a   Mikita Lipski   drm/dp_mst: Manua...
4295
4296
  				  struct drm_dp_mst_port *port, int pbn,
  				  int pbn_div)
edb1ed1ab   Pandiyan, Dhinakaran   drm/dp: Add DP MS...
4297
4298
  {
  	struct drm_dp_mst_topology_state *topology_state;
eceae1472   Lyude Paul   drm/dp_mst: Start...
4299
  	struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
4300
  	int prev_slots, prev_bw, req_slots;
edb1ed1ab   Pandiyan, Dhinakaran   drm/dp: Add DP MS...
4301
4302
  
  	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
56a91c493   Ville Syrjälä   drm/dp/mst: Handl...
4303
4304
  	if (IS_ERR(topology_state))
  		return PTR_ERR(topology_state);
edb1ed1ab   Pandiyan, Dhinakaran   drm/dp: Add DP MS...
4305

eceae1472   Lyude Paul   drm/dp_mst: Start...
4306
4307
4308
4309
4310
  	/* Find the current allocation for this port, if any */
  	list_for_each_entry(pos, &topology_state->vcpis, next) {
  		if (pos->port == port) {
  			vcpi = pos;
  			prev_slots = vcpi->vcpi;
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
4311
  			prev_bw = vcpi->pbn;
eceae1472   Lyude Paul   drm/dp_mst: Start...
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
  
  			/*
  			 * This should never happen, unless the driver tries
  			 * releasing and allocating the same VCPI allocation,
  			 * which is an error
  			 */
  			if (WARN_ON(!prev_slots)) {
  				DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state
  ",
  					  port);
  				return -EINVAL;
  			}
  
  			break;
  		}
edb1ed1ab   Pandiyan, Dhinakaran   drm/dp: Add DP MS...
4327
  	}
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
4328
  	if (!vcpi) {
eceae1472   Lyude Paul   drm/dp_mst: Start...
4329
  		prev_slots = 0;
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
4330
4331
  		prev_bw = 0;
  	}
eceae1472   Lyude Paul   drm/dp_mst: Start...
4332

1c6c1cb5a   Mikita Lipski   drm/dp_mst: Manua...
4333
4334
  	if (pbn_div <= 0)
  		pbn_div = mgr->pbn_div;
eceae1472   Lyude Paul   drm/dp_mst: Start...
4335

1c6c1cb5a   Mikita Lipski   drm/dp_mst: Manua...
4336
  	req_slots = DIV_ROUND_UP(pbn, pbn_div);
eceae1472   Lyude Paul   drm/dp_mst: Start...
4337
4338
4339
4340
4341
  
  	DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d
  ",
  			 port->connector->base.id, port->connector->name,
  			 port, prev_slots, req_slots);
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
4342
4343
4344
4345
  	DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d
  ",
  			 port->connector->base.id, port->connector->name,
  			 port, prev_bw, pbn);
eceae1472   Lyude Paul   drm/dp_mst: Start...
4346
4347
4348
4349
  
  	/* Add the new allocation to the state */
  	if (!vcpi) {
  		vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
a3d15c4b0   Lyude Paul   drm/dp_mst: Remov...
4350
4351
  		if (!vcpi)
  			return -ENOMEM;
edb1ed1ab   Pandiyan, Dhinakaran   drm/dp: Add DP MS...
4352

eceae1472   Lyude Paul   drm/dp_mst: Start...
4353
4354
4355
4356
4357
  		drm_dp_mst_get_port_malloc(port);
  		vcpi->port = port;
  		list_add(&vcpi->next, &topology_state->vcpis);
  	}
  	vcpi->vcpi = req_slots;
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
4358
  	vcpi->pbn = pbn;
edb1ed1ab   Pandiyan, Dhinakaran   drm/dp: Add DP MS...
4359

ddd9b54de   Wambui Karuga   drm: remove unnec...
4360
  	return req_slots;
edb1ed1ab   Pandiyan, Dhinakaran   drm/dp: Add DP MS...
4361
4362
4363
4364
4365
4366
4367
  }
  EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
  
  /**
   * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
   * @state: global atomic state
   * @mgr: MST topology manager for the port
eceae1472   Lyude Paul   drm/dp_mst: Start...
4368
   * @port: The port to release the VCPI slots from
edb1ed1ab   Pandiyan, Dhinakaran   drm/dp: Add DP MS...
4369
   *
eceae1472   Lyude Paul   drm/dp_mst: Start...
4370
4371
4372
   * Releases any VCPI slots that have been allocated to a port in the atomic
   * state. Any atomic drivers which support MST must call this function in
   * their &drm_connector_helper_funcs.atomic_check() callback when the
1e55a53a2   Matt Roper   drm: Trivial comm...
4373
   * connector will no longer have VCPI allocated (e.g. because its CRTC was
eceae1472   Lyude Paul   drm/dp_mst: Start...
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
   * removed) when it had VCPI allocated in the previous atomic state.
   *
   * It is OK to call this even if @port has been removed from the system.
   * Additionally, it is OK to call this function multiple times on the same
   * @port as needed. It is not OK however, to call this function and
   * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
   * phase.
   *
   * See also:
   * drm_dp_atomic_find_vcpi_slots()
   * drm_dp_mst_atomic_check()
   *
   * Returns:
   * 0 if all slots for this port were added back to
   * &drm_dp_mst_topology_state.avail_slots or negative error code
edb1ed1ab   Pandiyan, Dhinakaran   drm/dp: Add DP MS...
4389
4390
4391
   */
  int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
  				     struct drm_dp_mst_topology_mgr *mgr,
eceae1472   Lyude Paul   drm/dp_mst: Start...
4392
  				     struct drm_dp_mst_port *port)
edb1ed1ab   Pandiyan, Dhinakaran   drm/dp: Add DP MS...
4393
4394
  {
  	struct drm_dp_mst_topology_state *topology_state;
eceae1472   Lyude Paul   drm/dp_mst: Start...
4395
4396
  	struct drm_dp_vcpi_allocation *pos;
  	bool found = false;
edb1ed1ab   Pandiyan, Dhinakaran   drm/dp: Add DP MS...
4397
4398
  
  	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
56a91c493   Ville Syrjälä   drm/dp/mst: Handl...
4399
4400
  	if (IS_ERR(topology_state))
  		return PTR_ERR(topology_state);
edb1ed1ab   Pandiyan, Dhinakaran   drm/dp: Add DP MS...
4401

eceae1472   Lyude Paul   drm/dp_mst: Start...
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
  	list_for_each_entry(pos, &topology_state->vcpis, next) {
  		if (pos->port == port) {
  			found = true;
  			break;
  		}
  	}
  	if (WARN_ON(!found)) {
  		DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p
  ",
  			  port, &topology_state->base);
  		return -EINVAL;
  	}
  
  	DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0
  ", port, pos->vcpi);
  	if (pos->vcpi) {
  		drm_dp_mst_put_port_malloc(port);
  		pos->vcpi = 0;
7bfc1fec1   Mikita Lipski   drm/dp_mst: Zero ...
4420
  		pos->pbn = 0;
eceae1472   Lyude Paul   drm/dp_mst: Start...
4421
  	}
edb1ed1ab   Pandiyan, Dhinakaran   drm/dp: Add DP MS...
4422
4423
4424
4425
4426
4427
  
  	return 0;
  }
  EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
  
  /**
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4428
4429
4430
4431
4432
4433
   * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
   * @mgr: manager for this port
   * @port: port to allocate a virtual channel for.
   * @pbn: payload bandwidth number to request
   * @slots: returned number of slots for this PBN.
   */
1e797f556   Pandiyan, Dhinakaran   drm/dp: Split drm...
4434
4435
  bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
  			      struct drm_dp_mst_port *port, int pbn, int slots)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4436
4437
  {
  	int ret;
a34a0a632   Xin Xiong   drm: fix drm_dp_m...
4438
  	if (slots < 0)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4439
  		return false;
a34a0a632   Xin Xiong   drm: fix drm_dp_m...
4440
4441
  	port = drm_dp_mst_topology_get_port_validated(mgr, port);
  	if (!port)
1e797f556   Pandiyan, Dhinakaran   drm/dp: Split drm...
4442
  		return false;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4443
  	if (port->vcpi.vcpi > 0) {
e0ac7113f   Lyude Paul   drm/dp_mst: Fix s...
4444
4445
4446
  		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d
  ",
  			      port->vcpi.vcpi, port->vcpi.pbn, pbn);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4447
  		if (pbn == port->vcpi.pbn) {
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
4448
  			drm_dp_mst_topology_put_port(port);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4449
4450
4451
  			return true;
  		}
  	}
1e797f556   Pandiyan, Dhinakaran   drm/dp: Split drm...
4452
  	ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4453
  	if (ret) {
feb2c3bc3   Pandiyan, Dhinakaran   drm/dp: Kill unus...
4454
4455
  		DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d
  ",
e0ac7113f   Lyude Paul   drm/dp_mst: Fix s...
4456
  			      DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
a34a0a632   Xin Xiong   drm: fix drm_dp_m...
4457
  		drm_dp_mst_topology_put_port(port);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4458
4459
  		goto out;
  	}
feb2c3bc3   Pandiyan, Dhinakaran   drm/dp: Kill unus...
4460
4461
  	DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d
  ",
e0ac7113f   Lyude Paul   drm/dp_mst: Fix s...
4462
  		      pbn, port->vcpi.num_slots);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4463

1e55a53a2   Matt Roper   drm: Trivial comm...
4464
  	/* Keep port allocated until its payload has been removed */
cfe9f9035   Lyude Paul   drm/dp_mst: Fix p...
4465
  	drm_dp_mst_get_port_malloc(port);
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
4466
  	drm_dp_mst_topology_put_port(port);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4467
4468
4469
4470
4471
  	return true;
  out:
  	return false;
  }
  EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
87f5942d1   Dave Airlie   drm/dp_mst: add a...
4472
4473
4474
  int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
  {
  	int slots = 0;
948de8423   Suraj Upadhyay   drm : Insert blan...
4475

d0757afd0   Lyude Paul   drm/dp_mst: Renam...
4476
  	port = drm_dp_mst_topology_get_port_validated(mgr, port);
87f5942d1   Dave Airlie   drm/dp_mst: add a...
4477
4478
4479
4480
  	if (!port)
  		return slots;
  
  	slots = port->vcpi.num_slots;
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
4481
  	drm_dp_mst_topology_put_port(port);
87f5942d1   Dave Airlie   drm/dp_mst: add a...
4482
4483
4484
  	return slots;
  }
  EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4485
4486
4487
4488
4489
4490
4491
4492
4493
  /**
   * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
   * @mgr: manager for this port
   * @port: unverified pointer to a port.
   *
   * This just resets the number of slots for the ports VCPI for later programming.
   */
  void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
  {
cfe9f9035   Lyude Paul   drm/dp_mst: Fix p...
4494
  	/*
1e55a53a2   Matt Roper   drm: Trivial comm...
4495
  	 * A port with VCPI will remain allocated until its VCPI is
cfe9f9035   Lyude Paul   drm/dp_mst: Fix p...
4496
4497
  	 * released, no verified ref needed
  	 */
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4498
  	port->vcpi.num_slots = 0;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4499
4500
4501
4502
4503
4504
  }
  EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
  
  /**
   * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
   * @mgr: manager for this port
3a8844c29   Lyude Paul   drm/dp_mst: Fix u...
4505
4506
4507
4508
   * @port: port to deallocate vcpi for
   *
   * This can be called unconditionally, regardless of whether
   * drm_dp_mst_allocate_vcpi() succeeded or not.
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4509
   */
4afb8a26b   Lyude Paul   drm/dp_mst: Fix s...
4510
4511
  void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
  				struct drm_dp_mst_port *port)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4512
  {
3a8844c29   Lyude Paul   drm/dp_mst: Fix u...
4513
4514
  	if (!port->vcpi.vcpi)
  		return;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4515
4516
4517
4518
4519
4520
  
  	drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
  	port->vcpi.num_slots = 0;
  	port->vcpi.pbn = 0;
  	port->vcpi.aligned_pbn = 0;
  	port->vcpi.vcpi = 0;
cfe9f9035   Lyude Paul   drm/dp_mst: Fix p...
4521
  	drm_dp_mst_put_port_malloc(port);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4522
4523
4524
4525
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
  }
  EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
  
  static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
  				     int id, struct drm_dp_payload *payload)
  {
  	u8 payload_alloc[3], status;
  	int ret;
  	int retries = 0;
  
  	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
  			   DP_PAYLOAD_TABLE_UPDATED);
  
  	payload_alloc[0] = id;
  	payload_alloc[1] = payload->start_slot;
  	payload_alloc[2] = payload->num_slots;
  
  	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
  	if (ret != 3) {
  		DRM_DEBUG_KMS("failed to write payload allocation %d
  ", ret);
  		goto fail;
  	}
  
  retry:
  	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
  	if (ret < 0) {
  		DRM_DEBUG_KMS("failed to read payload table status %d
  ", ret);
  		goto fail;
  	}
  
  	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
  		retries++;
  		if (retries < 20) {
  			usleep_range(10000, 20000);
  			goto retry;
  		}
  		DRM_DEBUG_KMS("status not set after read payload table status %d
  ", status);
  		ret = -EINVAL;
  		goto fail;
  	}
  	ret = 0;
  fail:
  	return ret;
  }
873a95e0d   Lyude Paul   drm/dp_mst: Incre...
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
  static int do_get_act_status(struct drm_dp_aux *aux)
  {
  	int ret;
  	u8 status;
  
  	ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
  	if (ret < 0)
  		return ret;
  
  	return status;
  }
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4580
4581
  
  /**
17e03aa8c   Lyude Paul   drm/dp_mst: Impro...
4582
   * drm_dp_check_act_status() - Polls for ACT handled status.
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4583
4584
   * @mgr: manager to use
   *
17e03aa8c   Lyude Paul   drm/dp_mst: Impro...
4585
   * Tries waiting for the MST hub to finish updating it's payload table by
873a95e0d   Lyude Paul   drm/dp_mst: Incre...
4586
4587
   * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
   * take that long).
17e03aa8c   Lyude Paul   drm/dp_mst: Impro...
4588
4589
4590
   *
   * Returns:
   * 0 if the ACT was handled in time, negative error code on failure.
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4591
4592
4593
   */
  int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
  {
873a95e0d   Lyude Paul   drm/dp_mst: Incre...
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
  	/*
  	 * There doesn't seem to be any recommended retry count or timeout in
  	 * the MST specification. Since some hubs have been observed to take
  	 * over 1 second to update their payload allocations under certain
  	 * conditions, we use a rather large timeout value.
  	 */
  	const int timeout_ms = 3000;
  	int ret, status;
  
  	ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
  				 status & DP_PAYLOAD_ACT_HANDLED || status < 0,
  				 200, timeout_ms * USEC_PER_MSEC);
  	if (ret < 0 && status >= 0) {
4d1b58d5e   Lyude Paul   drm/dp_mst: Print...
4607
4608
4609
  		DRM_ERROR("Failed to get ACT after %dms, last status: %02x
  ",
  			  timeout_ms, status);
a5cb5fa6c   Lyude Paul   drm/dp_mst: Refor...
4610
  		return -EINVAL;
873a95e0d   Lyude Paul   drm/dp_mst: Incre...
4611
  	} else if (status < 0) {
4d1b58d5e   Lyude Paul   drm/dp_mst: Print...
4612
4613
4614
4615
  		/*
  		 * Failure here isn't unexpected - the hub may have
  		 * just been unplugged
  		 */
873a95e0d   Lyude Paul   drm/dp_mst: Incre...
4616
4617
4618
4619
  		DRM_DEBUG_KMS("Failed to read payload table status: %d
  ",
  			      status);
  		return status;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4620
  	}
873a95e0d   Lyude Paul   drm/dp_mst: Incre...
4621

ad7f8a1f9   Dave Airlie   drm/helper: add D...
4622
  	return 0;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4623
4624
4625
4626
4627
4628
4629
  }
  EXPORT_SYMBOL(drm_dp_check_act_status);
  
  /**
   * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
   * @clock: dot clock for the mode
   * @bpp: bpp for the mode.
dc48529fb   David Francis   drm/dp_mst: Add P...
4630
   * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4631
4632
4633
   *
   * This uses the formula in the spec to calculate the PBN value for a mode.
   */
dc48529fb   David Francis   drm/dp_mst: Add P...
4634
  int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4635
  {
a9ebb3e46   Harry Wentland   drm/dp/mst: Calcu...
4636
4637
4638
4639
4640
4641
4642
4643
4644
  	/*
  	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
  	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
  	 * common multiplier to render an integer PBN for all link rate/lane
  	 * counts combinations
  	 * calculate
  	 * peak_kbps *= (1006/1000)
  	 * peak_kbps *= (64/54)
  	 * peak_kbps *= 8    convert to bytes
dc48529fb   David Francis   drm/dp_mst: Add P...
4645
4646
4647
4648
  	 *
  	 * If the bpp is in units of 1/16, further divide by 16. Put this
  	 * factor in the numerator rather than the denominator to avoid
  	 * integer overflow
a9ebb3e46   Harry Wentland   drm/dp/mst: Calcu...
4649
  	 */
dc48529fb   David Francis   drm/dp_mst: Add P...
4650
4651
4652
4653
  
  	if (dsc)
  		return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
  					8 * 54 * 1000 * 1000);
ed20b7d5c   Ville Syrjälä   drm/dp/mst: Repla...
4654
4655
  	return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
  				8 * 54 * 1000 * 1000);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4656
4657
  }
  EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4658
4659
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
  /* we want to kick the TX after we've ack the up/down IRQs. */
  static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
  {
  	queue_work(system_long_wq, &mgr->tx_work);
  }
  
  static void drm_dp_mst_dump_mstb(struct seq_file *m,
  				 struct drm_dp_mst_branch *mstb)
  {
  	struct drm_dp_mst_port *port;
  	int tabs = mstb->lct;
  	char prefix[10];
  	int i;
  
  	for (i = 0; i < tabs; i++)
  		prefix[i] = '\t';
  	prefix[i] = '\0';
  
  	seq_printf(m, "%smst: %p, %d
  ", prefix, mstb, mstb->num_ports);
  	list_for_each_entry(port, &mstb->ports, next) {
51108f252   Jim Bride   drm/dp/mst: Enhan...
4679
4680
  		seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p
  ", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4681
4682
4683
4684
  		if (port->mstb)
  			drm_dp_mst_dump_mstb(m, port->mstb);
  	}
  }
7056a2bcc   Andy Shevchenko   drm/dp/mst: Fix o...
4685
  #define DP_PAYLOAD_TABLE_SIZE		64
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4686
4687
4688
  static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
  				  char *buf)
  {
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4689
  	int i;
46466b0da   Joe Perches   drm: Use vsnprint...
4690

7056a2bcc   Andy Shevchenko   drm/dp/mst: Fix o...
4691
  	for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
46466b0da   Joe Perches   drm: Use vsnprint...
4692
4693
4694
4695
  		if (drm_dp_dpcd_read(mgr->aux,
  				     DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
  				     &buf[i], 16) != 16)
  			return false;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4696
  	}
46466b0da   Joe Perches   drm: Use vsnprint...
4697
  	return true;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4698
  }
51108f252   Jim Bride   drm/dp/mst: Enhan...
4699
4700
4701
4702
4703
4704
4705
4706
4707
  static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
  			       struct drm_dp_mst_port *port, char *name,
  			       int namelen)
  {
  	struct edid *mst_edid;
  
  	mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
  	drm_edid_get_monitor_name(mst_edid, name, namelen);
  }
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4708
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
  /**
   * drm_dp_mst_dump_topology(): dump topology to seq file.
   * @m: seq_file to dump output to
   * @mgr: manager to dump current topology for.
   *
   * helper to dump MST topology to a seq file for debugfs.
   */
  void drm_dp_mst_dump_topology(struct seq_file *m,
  			      struct drm_dp_mst_topology_mgr *mgr)
  {
  	int i;
  	struct drm_dp_mst_port *port;
51108f252   Jim Bride   drm/dp/mst: Enhan...
4720

ad7f8a1f9   Dave Airlie   drm/helper: add D...
4721
4722
4723
4724
4725
4726
4727
4728
  	mutex_lock(&mgr->lock);
  	if (mgr->mst_primary)
  		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
  
  	/* dump VCPIs */
  	mutex_unlock(&mgr->lock);
  
  	mutex_lock(&mgr->payload_lock);
51108f252   Jim Bride   drm/dp/mst: Enhan...
4729
4730
4731
  	seq_printf(m, "vcpi: %lx %lx %d
  ", mgr->payload_mask, mgr->vcpi_mask,
  		mgr->max_payloads);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4732
4733
4734
  
  	for (i = 0; i < mgr->max_payloads; i++) {
  		if (mgr->proposed_vcpis[i]) {
51108f252   Jim Bride   drm/dp/mst: Enhan...
4735
  			char name[14];
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4736
  			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
51108f252   Jim Bride   drm/dp/mst: Enhan...
4737
4738
4739
4740
4741
4742
  			fetch_monitor_name(mgr, port, name, sizeof(name));
  			seq_printf(m, "vcpi %d: %d %d %d sink name: %s
  ", i,
  				   port->port_num, port->vcpi.vcpi,
  				   port->vcpi.num_slots,
  				   (*name != 0) ? name :  "Unknown");
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4743
  		} else
51108f252   Jim Bride   drm/dp/mst: Enhan...
4744
4745
  			seq_printf(m, "vcpi %d:unused
  ", i);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
  	}
  	for (i = 0; i < mgr->max_payloads; i++) {
  		seq_printf(m, "payload %d: %d, %d, %d
  ",
  			   i,
  			   mgr->payloads[i].payload_state,
  			   mgr->payloads[i].start_slot,
  			   mgr->payloads[i].num_slots);
  
  
  	}
  	mutex_unlock(&mgr->payload_lock);
  
  	mutex_lock(&mgr->lock);
  	if (mgr->mst_primary) {
7056a2bcc   Andy Shevchenko   drm/dp/mst: Fix o...
4761
  		u8 buf[DP_PAYLOAD_TABLE_SIZE];
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4762
  		int ret;
46466b0da   Joe Perches   drm: Use vsnprint...
4763

ad7f8a1f9   Dave Airlie   drm/helper: add D...
4764
  		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
4765
4766
4767
4768
4769
  		if (ret) {
  			seq_printf(m, "dpcd read failed
  ");
  			goto out;
  		}
46466b0da   Joe Perches   drm: Use vsnprint...
4770
4771
  		seq_printf(m, "dpcd: %*ph
  ", DP_RECEIVER_CAP_SIZE, buf);
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
4772

ad7f8a1f9   Dave Airlie   drm/helper: add D...
4773
  		ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
4774
4775
4776
4777
4778
  		if (ret) {
  			seq_printf(m, "faux/mst read failed
  ");
  			goto out;
  		}
46466b0da   Joe Perches   drm: Use vsnprint...
4779
4780
  		seq_printf(m, "faux/mst: %*ph
  ", 2, buf);
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
4781

ad7f8a1f9   Dave Airlie   drm/helper: add D...
4782
  		ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
4783
4784
4785
4786
4787
  		if (ret) {
  			seq_printf(m, "mst ctrl read failed
  ");
  			goto out;
  		}
46466b0da   Joe Perches   drm: Use vsnprint...
4788
4789
  		seq_printf(m, "mst ctrl: %*ph
  ", 1, buf);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4790

44790462d   Dave Airlie   drm/dp/mst: dump ...
4791
4792
  		/* dump the standard OUI branch header */
  		ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
4793
4794
4795
4796
4797
  		if (ret) {
  			seq_printf(m, "branch oui read failed
  ");
  			goto out;
  		}
46466b0da   Joe Perches   drm: Use vsnprint...
4798
  		seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
4799

51108f252   Jim Bride   drm/dp/mst: Enhan...
4800
  		for (i = 0x3; i < 0x8 && buf[i]; i++)
44790462d   Dave Airlie   drm/dp/mst: dump ...
4801
  			seq_printf(m, "%c", buf[i]);
46466b0da   Joe Perches   drm: Use vsnprint...
4802
4803
4804
4805
  		seq_printf(m, " revision: hw: %x.%x sw: %x.%x
  ",
  			   buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
  		if (dump_dp_payload_table(mgr, buf))
7056a2bcc   Andy Shevchenko   drm/dp/mst: Fix o...
4806
4807
  			seq_printf(m, "payload table: %*ph
  ", DP_PAYLOAD_TABLE_SIZE, buf);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4808
  	}
cb897542c   Benjamin Gaignard   drm/dp_mst: Fix W...
4809
  out:
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
  	mutex_unlock(&mgr->lock);
  
  }
  EXPORT_SYMBOL(drm_dp_mst_dump_topology);
  
  static void drm_dp_tx_work(struct work_struct *work)
  {
  	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
  
  	mutex_lock(&mgr->qlock);
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
4820
  	if (!list_empty(&mgr->tx_msg_downq))
ad7f8a1f9   Dave Airlie   drm/helper: add D...
4821
4822
4823
  		process_single_down_tx_qlock(mgr);
  	mutex_unlock(&mgr->qlock);
  }
72dc0f515   Lyude Paul   drm/dp_mst: Remov...
4824
4825
  static inline void
  drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
e1ae63b33   Pankaj Bharadiya   drm: Add drm_dp_d...
4826
  {
7d1150760   Imre Deak   drm/dp_mst: Fix t...
4827
  	drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
72dc0f515   Lyude Paul   drm/dp_mst: Remov...
4828
  	if (port->connector) {
e1ae63b33   Pankaj Bharadiya   drm: Add drm_dp_d...
4829
4830
4831
  		drm_connector_unregister(port->connector);
  		drm_connector_put(port->connector);
  	}
7cb12d483   Lyude Paul   drm/dp_mst: Destr...
4832

7cb12d483   Lyude Paul   drm/dp_mst: Destr...
4833
4834
4835
4836
4837
4838
4839
  	drm_dp_mst_put_port_malloc(port);
  }
  
  static inline void
  drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
  {
  	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
4840
4841
  	struct drm_dp_mst_port *port, *port_tmp;
  	struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
7cb12d483   Lyude Paul   drm/dp_mst: Destr...
4842
4843
4844
  	bool wake_tx = false;
  
  	mutex_lock(&mgr->lock);
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
4845
  	list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
7cb12d483   Lyude Paul   drm/dp_mst: Destr...
4846
4847
4848
4849
  		list_del(&port->next);
  		drm_dp_mst_topology_put_port(port);
  	}
  	mutex_unlock(&mgr->lock);
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
4850
  	/* drop any tx slot msg */
7cb12d483   Lyude Paul   drm/dp_mst: Destr...
4851
  	mutex_lock(&mstb->mgr->qlock);
d308a881a   Lyude Paul   drm/dp_mst: Kill ...
4852
4853
4854
4855
4856
4857
  	list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
  		if (txmsg->dst != mstb)
  			continue;
  
  		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
  		list_del(&txmsg->next);
7cb12d483   Lyude Paul   drm/dp_mst: Destr...
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
  		wake_tx = true;
  	}
  	mutex_unlock(&mstb->mgr->qlock);
  
  	if (wake_tx)
  		wake_up_all(&mstb->mgr->tx_waitq);
  
  	drm_dp_mst_put_mstb_malloc(mstb);
  }
  
  static void drm_dp_delayed_destroy_work(struct work_struct *work)
  {
  	struct drm_dp_mst_topology_mgr *mgr =
  		container_of(work, struct drm_dp_mst_topology_mgr,
  			     delayed_destroy_work);
  	bool send_hotplug = false, go_again;
6b8eeca65   Dave Airlie   drm/dp/mst: close...
4874
4875
  	/*
  	 * Not a regular list traverse as we have to drop the destroy
7cb12d483   Lyude Paul   drm/dp_mst: Destr...
4876
  	 * connector lock before destroying the mstb/port, to avoid AB->BA
6b8eeca65   Dave Airlie   drm/dp/mst: close...
4877
4878
  	 * ordering between this lock and the config mutex.
  	 */
7cb12d483   Lyude Paul   drm/dp_mst: Destr...
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891
4892
4893
4894
4895
4896
4897
  	do {
  		go_again = false;
  
  		for (;;) {
  			struct drm_dp_mst_branch *mstb;
  
  			mutex_lock(&mgr->delayed_destroy_lock);
  			mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
  							struct drm_dp_mst_branch,
  							destroy_next);
  			if (mstb)
  				list_del(&mstb->destroy_next);
  			mutex_unlock(&mgr->delayed_destroy_lock);
  
  			if (!mstb)
  				break;
  
  			drm_dp_delayed_destroy_mstb(mstb);
  			go_again = true;
6b8eeca65   Dave Airlie   drm/dp/mst: close...
4898
  		}
6b8eeca65   Dave Airlie   drm/dp/mst: close...
4899

7cb12d483   Lyude Paul   drm/dp_mst: Destr...
4900
4901
  		for (;;) {
  			struct drm_dp_mst_port *port;
4772ff03d   Maarten Lankhorst   drm/dp/mst: Remov...
4902

7cb12d483   Lyude Paul   drm/dp_mst: Destr...
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
  			mutex_lock(&mgr->delayed_destroy_lock);
  			port = list_first_entry_or_null(&mgr->destroy_port_list,
  							struct drm_dp_mst_port,
  							next);
  			if (port)
  				list_del(&port->next);
  			mutex_unlock(&mgr->delayed_destroy_lock);
  
  			if (!port)
  				break;
  
  			drm_dp_delayed_destroy_port(port);
  			send_hotplug = true;
  			go_again = true;
  		}
  	} while (go_again);
4772ff03d   Maarten Lankhorst   drm/dp/mst: Remov...
4919

df4839fdc   Dave Airlie   drm/dp/mst: fixup...
4920
  	if (send_hotplug)
16bff572c   Daniel Vetter   drm/dp-mst-helper...
4921
  		drm_kms_helper_hotplug_event(mgr->dev);
6b8eeca65   Dave Airlie   drm/dp/mst: close...
4922
  }
a4370c777   Ville Syrjälä   drm/atomic: Make ...
4923
4924
  static struct drm_private_state *
  drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
3f3353b7e   Pandiyan, Dhinakaran   drm/dp: Introduce...
4925
  {
eceae1472   Lyude Paul   drm/dp_mst: Start...
4926
4927
4928
  	struct drm_dp_mst_topology_state *state, *old_state =
  		to_dp_mst_topology_state(obj->state);
  	struct drm_dp_vcpi_allocation *pos, *vcpi;
3f3353b7e   Pandiyan, Dhinakaran   drm/dp: Introduce...
4929

eceae1472   Lyude Paul   drm/dp_mst: Start...
4930
  	state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
a4370c777   Ville Syrjälä   drm/atomic: Make ...
4931
  	if (!state)
3f3353b7e   Pandiyan, Dhinakaran   drm/dp: Introduce...
4932
  		return NULL;
a4370c777   Ville Syrjälä   drm/atomic: Make ...
4933
  	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
3f3353b7e   Pandiyan, Dhinakaran   drm/dp: Introduce...
4934

eceae1472   Lyude Paul   drm/dp_mst: Start...
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948
  	INIT_LIST_HEAD(&state->vcpis);
  
  	list_for_each_entry(pos, &old_state->vcpis, next) {
  		/* Prune leftover freed VCPI allocations */
  		if (!pos->vcpi)
  			continue;
  
  		vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
  		if (!vcpi)
  			goto fail;
  
  		drm_dp_mst_get_port_malloc(vcpi->port);
  		list_add(&vcpi->next, &state->vcpis);
  	}
a4370c777   Ville Syrjälä   drm/atomic: Make ...
4949
  	return &state->base;
eceae1472   Lyude Paul   drm/dp_mst: Start...
4950
4951
4952
4953
4954
4955
4956
4957
4958
  
  fail:
  	list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
  		drm_dp_mst_put_port_malloc(pos->port);
  		kfree(pos);
  	}
  	kfree(state);
  
  	return NULL;
3f3353b7e   Pandiyan, Dhinakaran   drm/dp: Introduce...
4959
  }
a4370c777   Ville Syrjälä   drm/atomic: Make ...
4960
4961
  static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
  				     struct drm_private_state *state)
3f3353b7e   Pandiyan, Dhinakaran   drm/dp: Introduce...
4962
  {
a4370c777   Ville Syrjälä   drm/atomic: Make ...
4963
4964
  	struct drm_dp_mst_topology_state *mst_state =
  		to_dp_mst_topology_state(state);
eceae1472   Lyude Paul   drm/dp_mst: Start...
4965
4966
4967
4968
4969
4970
4971
4972
  	struct drm_dp_vcpi_allocation *pos, *tmp;
  
  	list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
  		/* We only keep references to ports with non-zero VCPIs */
  		if (pos->vcpi)
  			drm_dp_mst_put_port_malloc(pos->port);
  		kfree(pos);
  	}
a4370c777   Ville Syrjälä   drm/atomic: Make ...
4973
4974
  
  	kfree(mst_state);
3f3353b7e   Pandiyan, Dhinakaran   drm/dp: Introduce...
4975
  }
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
4976
4977
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989
  static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
  						 struct drm_dp_mst_branch *branch)
  {
  	while (port->parent) {
  		if (port->parent == branch)
  			return true;
  
  		if (port->parent->port_parent)
  			port = port->parent->port_parent;
  		else
  			break;
  	}
  	return false;
  }
047d4cd20   Lyude Paul   drm/dp_mst: Rewri...
4990
4991
4992
4993
4994
4995
4996
  static int
  drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
  				      struct drm_dp_mst_topology_state *state);
  
  static int
  drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
  				      struct drm_dp_mst_topology_state *state)
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
4997
  {
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
4998
  	struct drm_dp_vcpi_allocation *vcpi;
047d4cd20   Lyude Paul   drm/dp_mst: Rewri...
4999
5000
5001
  	struct drm_dp_mst_port *port;
  	int pbn_used = 0, ret;
  	bool found = false;
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
5002

047d4cd20   Lyude Paul   drm/dp_mst: Rewri...
5003
5004
5005
5006
5007
5008
5009
  	/* Check that we have at least one port in our state that's downstream
  	 * of this branch, otherwise we can skip this branch
  	 */
  	list_for_each_entry(vcpi, &state->vcpis, next) {
  		if (!vcpi->pbn ||
  		    !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
  			continue;
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
5010

047d4cd20   Lyude Paul   drm/dp_mst: Rewri...
5011
5012
  		found = true;
  		break;
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
5013
  	}
047d4cd20   Lyude Paul   drm/dp_mst: Rewri...
5014
5015
  	if (!found)
  		return 0;
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
5016

047d4cd20   Lyude Paul   drm/dp_mst: Rewri...
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027
5028
5029
5030
  	if (mstb->port_parent)
  		DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]
  ",
  				 mstb->port_parent->parent, mstb->port_parent,
  				 mstb);
  	else
  		DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits
  ",
  				 mstb);
  
  	list_for_each_entry(port, &mstb->ports, next) {
  		ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
  		if (ret < 0)
  			return ret;
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
5031

047d4cd20   Lyude Paul   drm/dp_mst: Rewri...
5032
  		pbn_used += ret;
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
5033
  	}
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
5034

047d4cd20   Lyude Paul   drm/dp_mst: Rewri...
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060
5061
  	return pbn_used;
  }
  
  static int
  drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
  				      struct drm_dp_mst_topology_state *state)
  {
  	struct drm_dp_vcpi_allocation *vcpi;
  	int pbn_used = 0;
  
  	if (port->pdt == DP_PEER_DEVICE_NONE)
  		return 0;
  
  	if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
  		bool found = false;
  
  		list_for_each_entry(vcpi, &state->vcpis, next) {
  			if (vcpi->port != port)
  				continue;
  			if (!vcpi->pbn)
  				return 0;
  
  			found = true;
  			break;
  		}
  		if (!found)
  			return 0;
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
5062

047d4cd20   Lyude Paul   drm/dp_mst: Rewri...
5063
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
  		/* This should never happen, as it means we tried to
  		 * set a mode before querying the full_pbn
  		 */
  		if (WARN_ON(!port->full_pbn))
  			return -EINVAL;
  
  		pbn_used = vcpi->pbn;
  	} else {
  		pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
  								 state);
  		if (pbn_used <= 0)
  			return pbn_used;
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
5075
  	}
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
5076

047d4cd20   Lyude Paul   drm/dp_mst: Rewri...
5077
5078
5079
5080
5081
  	if (pbn_used > port->full_pbn) {
  		DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d
  ",
  				 port->parent, port, pbn_used,
  				 port->full_pbn);
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
5082
5083
  		return -ENOSPC;
  	}
047d4cd20   Lyude Paul   drm/dp_mst: Rewri...
5084
5085
5086
5087
5088
5089
  
  	DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN
  ",
  			 port->parent, port, pbn_used, port->full_pbn);
  
  	return pbn_used;
cd82d82cb   Mikita Lipski   drm/dp_mst: Add b...
5090
  }
eceae1472   Lyude Paul   drm/dp_mst: Start...
5091
  static inline int
9e5b95903   Mikita Lipski   drm/dp_mst: Renam...
5092
5093
  drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
  					 struct drm_dp_mst_topology_state *mst_state)
eceae1472   Lyude Paul   drm/dp_mst: Start...
5094
5095
  {
  	struct drm_dp_vcpi_allocation *vcpi;
5e187a014   Lyude Paul   drm/dp_mst: Check...
5096
  	int avail_slots = 63, payload_count = 0;
eceae1472   Lyude Paul   drm/dp_mst: Start...
5097
5098
5099
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118
  
  	list_for_each_entry(vcpi, &mst_state->vcpis, next) {
  		/* Releasing VCPI is always OK-even if the port is gone */
  		if (!vcpi->vcpi) {
  			DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots
  ",
  					 vcpi->port);
  			continue;
  		}
  
  		DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots
  ",
  				 vcpi->port, vcpi->vcpi);
  
  		avail_slots -= vcpi->vcpi;
  		if (avail_slots < 0) {
  			DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)
  ",
  					 vcpi->port, mst_state,
  					 avail_slots + vcpi->vcpi);
  			return -ENOSPC;
  		}
5e187a014   Lyude Paul   drm/dp_mst: Check...
5119
5120
5121
5122
5123
5124
5125
  
  		if (++payload_count > mgr->max_payloads) {
  			DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)
  ",
  					 mgr, mst_state, mgr->max_payloads);
  			return -EINVAL;
  		}
eceae1472   Lyude Paul   drm/dp_mst: Start...
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135
  	}
  	DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d
  ",
  			 mgr, mst_state, avail_slots,
  			 63 - avail_slots);
  
  	return 0;
  }
  
  /**
8ec046716   Mikita Lipski   drm/dp_mst: Add h...
5136
5137
   * drm_dp_mst_add_affected_dsc_crtcs
   * @state: Pointer to the new struct drm_dp_mst_topology_state
9edb435ae   Alex Deucher   drm/dp_mst: fix d...
5138
   * @mgr: MST topology manager
8ec046716   Mikita Lipski   drm/dp_mst: Add h...
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150
5151
5152
5153
5154
5155
5156
5157
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172
5173
5174
   *
   * Whenever there is a change in mst topology
   * DSC configuration would have to be recalculated
   * therefore we need to trigger modeset on all affected
   * CRTCs in that topology
   *
   * See also:
   * drm_dp_mst_atomic_enable_dsc()
   */
  int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
  {
  	struct drm_dp_mst_topology_state *mst_state;
  	struct drm_dp_vcpi_allocation *pos;
  	struct drm_connector *connector;
  	struct drm_connector_state *conn_state;
  	struct drm_crtc *crtc;
  	struct drm_crtc_state *crtc_state;
  
  	mst_state = drm_atomic_get_mst_topology_state(state, mgr);
  
  	if (IS_ERR(mst_state))
  		return -EINVAL;
  
  	list_for_each_entry(pos, &mst_state->vcpis, next) {
  
  		connector = pos->port->connector;
  
  		if (!connector)
  			return -EINVAL;
  
  		conn_state = drm_atomic_get_connector_state(state, connector);
  
  		if (IS_ERR(conn_state))
  			return PTR_ERR(conn_state);
  
  		crtc = conn_state->crtc;
88fee1c90   Bhawanpreet Lakha   drm/dp_mst: Don't...
5175
5176
  		if (!crtc)
  			continue;
8ec046716   Mikita Lipski   drm/dp_mst: Add h...
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194
5195
5196
  
  		if (!drm_dp_mst_dsc_aux_for_port(pos->port))
  			continue;
  
  		crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
  
  		if (IS_ERR(crtc_state))
  			return PTR_ERR(crtc_state);
  
  		DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p
  ",
  				 mgr, crtc);
  
  		crtc_state->mode_changed = true;
  	}
  	return 0;
  }
  EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
  
  /**
8afb7e6af   Mikita Lipski   drm/dp_mst: Add D...
5197
5198
5199
5200
5201
5202
5203
5204
5205
5206
5207
5208
5209
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236
5237
5238
5239
5240
5241
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259
5260
   * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
   * @state: Pointer to the new drm_atomic_state
   * @port: Pointer to the affected MST Port
   * @pbn: Newly recalculated bw required for link with DSC enabled
   * @pbn_div: Divider to calculate correct number of pbn per slot
   * @enable: Boolean flag to enable or disable DSC on the port
   *
   * This function enables DSC on the given Port
   * by recalculating its vcpi from pbn provided
   * and sets dsc_enable flag to keep track of which
   * ports have DSC enabled
   *
   */
  int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
  				 struct drm_dp_mst_port *port,
  				 int pbn, int pbn_div,
  				 bool enable)
  {
  	struct drm_dp_mst_topology_state *mst_state;
  	struct drm_dp_vcpi_allocation *pos;
  	bool found = false;
  	int vcpi = 0;
  
  	mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
  
  	if (IS_ERR(mst_state))
  		return PTR_ERR(mst_state);
  
  	list_for_each_entry(pos, &mst_state->vcpis, next) {
  		if (pos->port == port) {
  			found = true;
  			break;
  		}
  	}
  
  	if (!found) {
  		DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p
  ",
  				 port, mst_state);
  		return -EINVAL;
  	}
  
  	if (pos->dsc_enabled == enable) {
  		DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots
  ",
  				 port, enable, pos->vcpi);
  		vcpi = pos->vcpi;
  	}
  
  	if (enable) {
  		vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
  		DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port
  ",
  				 port, vcpi);
  		if (vcpi < 0)
  			return -EINVAL;
  	}
  
  	pos->dsc_enabled = enable;
  
  	return vcpi;
  }
  EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
  /**
eceae1472   Lyude Paul   drm/dp_mst: Start...
5261
5262
5263
5264
5265
5266
5267
5268
5269
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
   * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
   * atomic update is valid
   * @state: Pointer to the new &struct drm_dp_mst_topology_state
   *
   * Checks the given topology state for an atomic update to ensure that it's
   * valid. This includes checking whether there's enough bandwidth to support
   * the new VCPI allocations in the atomic update.
   *
   * Any atomic drivers supporting DP MST must make sure to call this after
   * checking the rest of their state in their
   * &drm_mode_config_funcs.atomic_check() callback.
   *
   * See also:
   * drm_dp_atomic_find_vcpi_slots()
   * drm_dp_atomic_release_vcpi_slots()
   *
   * Returns:
   *
   * 0 if the new state is valid, negative error code otherwise.
   */
  int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
  {
  	struct drm_dp_mst_topology_mgr *mgr;
  	struct drm_dp_mst_topology_state *mst_state;
  	int i, ret = 0;
  
  	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
7b1991438   José Roberto de Souza   drm/mst: Don't do...
5288
5289
  		if (!mgr->mst_state)
  			continue;
9e5b95903   Mikita Lipski   drm/dp_mst: Renam...
5290
  		ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
eceae1472   Lyude Paul   drm/dp_mst: Start...
5291
5292
  		if (ret)
  			break;
047d4cd20   Lyude Paul   drm/dp_mst: Rewri...
5293
5294
5295
5296
5297
5298
  
  		mutex_lock(&mgr->lock);
  		ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
  							    mst_state);
  		mutex_unlock(&mgr->lock);
  		if (ret < 0)
eceae1472   Lyude Paul   drm/dp_mst: Start...
5299
  			break;
047d4cd20   Lyude Paul   drm/dp_mst: Rewri...
5300
5301
  		else
  			ret = 0;
eceae1472   Lyude Paul   drm/dp_mst: Start...
5302
5303
5304
5305
5306
  	}
  
  	return ret;
  }
  EXPORT_SYMBOL(drm_dp_mst_atomic_check);
bea5c38f1   Lyude Paul   drm/dp_mst: Add s...
5307
  const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
a4370c777   Ville Syrjälä   drm/atomic: Make ...
5308
5309
  	.atomic_duplicate_state = drm_dp_mst_duplicate_state,
  	.atomic_destroy_state = drm_dp_mst_destroy_state,
3f3353b7e   Pandiyan, Dhinakaran   drm/dp: Introduce...
5310
  };
bea5c38f1   Lyude Paul   drm/dp_mst: Add s...
5311
  EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
3f3353b7e   Pandiyan, Dhinakaran   drm/dp: Introduce...
5312
5313
5314
5315
5316
5317
5318
5319
5320
5321
5322
5323
5324
5325
5326
5327
5328
5329
5330
  
  /**
   * drm_atomic_get_mst_topology_state: get MST topology state
   *
   * @state: global atomic state
   * @mgr: MST topology manager, also the private object in this case
   *
   * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
   * state vtable so that the private object state returned is that of a MST
   * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
   * to care of the locking, so warn if don't hold the connection_mutex.
   *
   * RETURNS:
   *
   * The MST topology state or error pointer.
   */
  struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
  								    struct drm_dp_mst_topology_mgr *mgr)
  {
a4370c777   Ville Syrjälä   drm/atomic: Make ...
5331
  	return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
3f3353b7e   Pandiyan, Dhinakaran   drm/dp: Introduce...
5332
5333
  }
  EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345
  /**
   * drm_dp_mst_topology_mgr_init - initialise a topology manager
   * @mgr: manager struct to initialise
   * @dev: device providing this structure - for i2c addition.
   * @aux: DP helper aux channel to talk to this device
   * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
   * @max_payloads: maximum number of payloads this GPU can source
   * @conn_base_id: the connector object ID the MST device is connected to.
   *
   * Return 0 for success, or negative error code on failure
   */
  int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
7b0a89a6d   Dhinakaran Pandiyan   drm/dp: Store drm...
5346
  				 struct drm_device *dev, struct drm_dp_aux *aux,
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5347
5348
5349
  				 int max_dpcd_transaction_bytes,
  				 int max_payloads, int conn_base_id)
  {
a4370c777   Ville Syrjälä   drm/atomic: Make ...
5350
  	struct drm_dp_mst_topology_state *mst_state;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5351
5352
5353
  	mutex_init(&mgr->lock);
  	mutex_init(&mgr->qlock);
  	mutex_init(&mgr->payload_lock);
7cb12d483   Lyude Paul   drm/dp_mst: Destr...
5354
  	mutex_init(&mgr->delayed_destroy_lock);
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
5355
  	mutex_init(&mgr->up_req_lock);
14692a363   Lyude Paul   drm/dp_mst: Add p...
5356
  	mutex_init(&mgr->probe_lock);
12a280c72   Lyude Paul   drm/dp_mst: Add t...
5357
5358
5359
  #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
  	mutex_init(&mgr->topology_ref_history_lock);
  #endif
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5360
  	INIT_LIST_HEAD(&mgr->tx_msg_downq);
7cb12d483   Lyude Paul   drm/dp_mst: Destr...
5361
5362
  	INIT_LIST_HEAD(&mgr->destroy_port_list);
  	INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
5363
  	INIT_LIST_HEAD(&mgr->up_req_list);
72822c3bf   Imre Deak   drm/dp_mst: Fix f...
5364
5365
5366
5367
5368
5369
5370
5371
  
  	/*
  	 * delayed_destroy_work will be queued on a dedicated WQ, so that any
  	 * requeuing will be also flushed when deiniting the topology manager.
  	 */
  	mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
  	if (mgr->delayed_destroy_wq == NULL)
  		return -ENOMEM;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5372
5373
  	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
  	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
7cb12d483   Lyude Paul   drm/dp_mst: Destr...
5374
  	INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
5375
  	INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5376
5377
5378
5379
5380
5381
  	init_waitqueue_head(&mgr->tx_waitq);
  	mgr->dev = dev;
  	mgr->aux = aux;
  	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
  	mgr->max_payloads = max_payloads;
  	mgr->conn_base_id = conn_base_id;
4d6a10da7   Imre Deak   drm/mst: Add rang...
5382
5383
5384
  	if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
  	    max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
  		return -EINVAL;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5385
5386
5387
5388
5389
5390
5391
  	mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
  	if (!mgr->payloads)
  		return -ENOMEM;
  	mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
  	if (!mgr->proposed_vcpis)
  		return -ENOMEM;
  	set_bit(0, &mgr->payload_mask);
441388a8a   Imre Deak   drm/mst: Don't ig...
5392

a4370c777   Ville Syrjälä   drm/atomic: Make ...
5393
5394
  	mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
  	if (mst_state == NULL)
3f3353b7e   Pandiyan, Dhinakaran   drm/dp: Introduce...
5395
  		return -ENOMEM;
a4370c777   Ville Syrjälä   drm/atomic: Make ...
5396
5397
  
  	mst_state->mgr = mgr;
eceae1472   Lyude Paul   drm/dp_mst: Start...
5398
  	INIT_LIST_HEAD(&mst_state->vcpis);
a4370c777   Ville Syrjälä   drm/atomic: Make ...
5399

b962a1205   Rob Clark   drm/atomic: integ...
5400
  	drm_atomic_private_obj_init(dev, &mgr->base,
a4370c777   Ville Syrjälä   drm/atomic: Make ...
5401
  				    &mst_state->base,
bea5c38f1   Lyude Paul   drm/dp_mst: Add s...
5402
  				    &drm_dp_mst_topology_state_funcs);
3f3353b7e   Pandiyan, Dhinakaran   drm/dp: Introduce...
5403

ad7f8a1f9   Dave Airlie   drm/helper: add D...
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
  	return 0;
  }
  EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
  
  /**
   * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
   * @mgr: manager to destroy
   */
  void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
  {
f536e00c4   Lyude Paul   drm/dp_mst: Fix m...
5414
  	drm_dp_mst_topology_mgr_set_mst(mgr, false);
274d83524   Dave Airlie   drm/dp/mst: drop ...
5415
  	flush_work(&mgr->work);
72822c3bf   Imre Deak   drm/dp_mst: Fix f...
5416
5417
5418
5419
5420
  	/* The following will also drain any requeued work on the WQ. */
  	if (mgr->delayed_destroy_wq) {
  		destroy_workqueue(mgr->delayed_destroy_wq);
  		mgr->delayed_destroy_wq = NULL;
  	}
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5421
5422
5423
5424
5425
5426
5427
5428
  	mutex_lock(&mgr->payload_lock);
  	kfree(mgr->payloads);
  	mgr->payloads = NULL;
  	kfree(mgr->proposed_vcpis);
  	mgr->proposed_vcpis = NULL;
  	mutex_unlock(&mgr->payload_lock);
  	mgr->dev = NULL;
  	mgr->aux = NULL;
a4370c777   Ville Syrjälä   drm/atomic: Make ...
5429
  	drm_atomic_private_obj_fini(&mgr->base);
3f3353b7e   Pandiyan, Dhinakaran   drm/dp: Introduce...
5430
  	mgr->funcs = NULL;
50094b5dc   Lyude Paul   drm/dp_mst: Destr...
5431

7cb12d483   Lyude Paul   drm/dp_mst: Destr...
5432
  	mutex_destroy(&mgr->delayed_destroy_lock);
50094b5dc   Lyude Paul   drm/dp_mst: Destr...
5433
5434
5435
  	mutex_destroy(&mgr->payload_lock);
  	mutex_destroy(&mgr->qlock);
  	mutex_destroy(&mgr->lock);
9408cc94e   Lyude Paul   drm/dp_mst: Handl...
5436
  	mutex_destroy(&mgr->up_req_lock);
14692a363   Lyude Paul   drm/dp_mst: Add p...
5437
  	mutex_destroy(&mgr->probe_lock);
12a280c72   Lyude Paul   drm/dp_mst: Add t...
5438
5439
5440
  #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
  	mutex_destroy(&mgr->topology_ref_history_lock);
  #endif
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5441
5442
  }
  EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
cb8ce7111   Ville Syrjälä   drm/dp/mst: Valid...
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458
  static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
  {
  	int i;
  
  	if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
  		return false;
  
  	for (i = 0; i < num - 1; i++) {
  		if (msgs[i].flags & I2C_M_RD ||
  		    msgs[i].len > 0xff)
  			return false;
  	}
  
  	return msgs[num - 1].flags & I2C_M_RD &&
  		msgs[num - 1].len <= 0xff;
  }
adb48b269   Sam McNally   drm/dp_mst: Suppo...
5459
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
  static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num)
  {
  	int i;
  
  	for (i = 0; i < num - 1; i++) {
  		if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) ||
  		    msgs[i].len > 0xff)
  			return false;
  	}
  
  	return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff;
  }
  
  static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb,
  			       struct drm_dp_mst_port *port,
  			       struct i2c_msg *msgs, int num)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5475
  {
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5476
5477
  	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
  	unsigned int i;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5478
5479
5480
  	struct drm_dp_sideband_msg_req_body msg;
  	struct drm_dp_sideband_msg_tx *txmsg = NULL;
  	int ret;
ae491542c   Dave Airlie   drm/dp/mst: make ...
5481
  	memset(&msg, 0, sizeof(msg));
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5482
5483
5484
5485
5486
5487
5488
  	msg.req_type = DP_REMOTE_I2C_READ;
  	msg.u.i2c_read.num_transactions = num - 1;
  	msg.u.i2c_read.port_number = port->port_num;
  	for (i = 0; i < num - 1; i++) {
  		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
  		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
  		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
c978ae9bd   Ville Syrjälä   drm/dp/mst: Confi...
5489
  		msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506
  	}
  	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
  	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
  
  	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
  	if (!txmsg) {
  		ret = -ENOMEM;
  		goto out;
  	}
  
  	txmsg->dst = mstb;
  	drm_dp_encode_sideband_req(&msg, txmsg);
  
  	drm_dp_queue_down_tx(mgr, txmsg);
  
  	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
  	if (ret > 0) {
45bbda1e3   Ville Syrjälä   drm/dp/mst: Provi...
5507
  		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
  			ret = -EREMOTEIO;
  			goto out;
  		}
  		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
  			ret = -EIO;
  			goto out;
  		}
  		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
  		ret = num;
  	}
  out:
  	kfree(txmsg);
adb48b269   Sam McNally   drm/dp_mst: Suppo...
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549
5550
5551
5552
5553
5554
5555
5556
5557
5558
5559
5560
5561
5562
5563
5564
5565
5566
5567
5568
5569
5570
5571
5572
5573
5574
5575
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591
  	return ret;
  }
  
  static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb,
  				struct drm_dp_mst_port *port,
  				struct i2c_msg *msgs, int num)
  {
  	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
  	unsigned int i;
  	struct drm_dp_sideband_msg_req_body msg;
  	struct drm_dp_sideband_msg_tx *txmsg = NULL;
  	int ret;
  
  	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
  	if (!txmsg) {
  		ret = -ENOMEM;
  		goto out;
  	}
  	for (i = 0; i < num; i++) {
  		memset(&msg, 0, sizeof(msg));
  		msg.req_type = DP_REMOTE_I2C_WRITE;
  		msg.u.i2c_write.port_number = port->port_num;
  		msg.u.i2c_write.write_i2c_device_id = msgs[i].addr;
  		msg.u.i2c_write.num_bytes = msgs[i].len;
  		msg.u.i2c_write.bytes = msgs[i].buf;
  
  		memset(txmsg, 0, sizeof(*txmsg));
  		txmsg->dst = mstb;
  
  		drm_dp_encode_sideband_req(&msg, txmsg);
  		drm_dp_queue_down_tx(mgr, txmsg);
  
  		ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
  		if (ret > 0) {
  			if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
  				ret = -EREMOTEIO;
  				goto out;
  			}
  		} else {
  			goto out;
  		}
  	}
  	ret = num;
  out:
  	kfree(txmsg);
  	return ret;
  }
  
  /* I2C device */
  static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,
  			       struct i2c_msg *msgs, int num)
  {
  	struct drm_dp_aux *aux = adapter->algo_data;
  	struct drm_dp_mst_port *port =
  		container_of(aux, struct drm_dp_mst_port, aux);
  	struct drm_dp_mst_branch *mstb;
  	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
  	int ret;
  
  	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
  	if (!mstb)
  		return -EREMOTEIO;
  
  	if (remote_i2c_read_ok(msgs, num)) {
  		ret = drm_dp_mst_i2c_read(mstb, port, msgs, num);
  	} else if (remote_i2c_write_ok(msgs, num)) {
  		ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);
  	} else {
  		DRM_DEBUG_KMS("Unsupported I2C transaction for MST device
  ");
  		ret = -EIO;
  	}
d0757afd0   Lyude Paul   drm/dp_mst: Renam...
5592
  	drm_dp_mst_topology_put_mstb(mstb);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5593
5594
5595
5596
5597
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608
5609
5610
  	return ret;
  }
  
  static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
  {
  	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
  	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
  	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
  	       I2C_FUNC_10BIT_ADDR;
  }
  
  static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
  	.functionality = drm_dp_mst_i2c_functionality,
  	.master_xfer = drm_dp_mst_i2c_xfer,
  };
  
  /**
   * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
d8bd15b37   Imre Deak   drm/dp_mst: Fix t...
5611
   * @port: The port to add the I2C bus on
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5612
5613
5614
   *
   * Returns 0 on success or a negative error code on failure.
   */
d8bd15b37   Imre Deak   drm/dp_mst: Fix t...
5615
  static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5616
  {
d8bd15b37   Imre Deak   drm/dp_mst: Fix t...
5617
5618
  	struct drm_dp_aux *aux = &port->aux;
  	struct device *parent_dev = port->mgr->dev->dev;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5619
5620
5621
5622
5623
5624
  	aux->ddc.algo = &drm_dp_mst_i2c_algo;
  	aux->ddc.algo_data = aux;
  	aux->ddc.retries = 3;
  
  	aux->ddc.class = I2C_CLASS_DDC;
  	aux->ddc.owner = THIS_MODULE;
d8bd15b37   Imre Deak   drm/dp_mst: Fix t...
5625
5626
5627
  	/* FIXME: set the kdev of the port's connector as parent */
  	aux->ddc.dev.parent = parent_dev;
  	aux->ddc.dev.of_node = parent_dev->of_node;
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5628

d8bd15b37   Imre Deak   drm/dp_mst: Fix t...
5629
  	strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev),
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5630
5631
5632
5633
5634
5635
5636
  		sizeof(aux->ddc.name));
  
  	return i2c_add_adapter(&aux->ddc);
  }
  
  /**
   * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
d8bd15b37   Imre Deak   drm/dp_mst: Fix t...
5637
   * @port: The port to remove the I2C bus from
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5638
   */
d8bd15b37   Imre Deak   drm/dp_mst: Fix t...
5639
  static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port)
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5640
  {
d8bd15b37   Imre Deak   drm/dp_mst: Fix t...
5641
  	i2c_del_adapter(&port->aux.ddc);
ad7f8a1f9   Dave Airlie   drm/helper: add D...
5642
  }
c2bc1b6ea   David Francis   drm/dp_mst: Add h...
5643
5644
5645
5646
5647
5648
5649
5650
5651
5652
5653
5654
5655
5656
5657
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686
5687
5688
5689
5690
5691
5692
5693
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706
5707
5708
5709
5710
5711
5712
5713
5714
  
  /**
   * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
   * @port: The port to check
   *
   * A single physical MST hub object can be represented in the topology
   * by multiple branches, with virtual ports between those branches.
   *
   * As of DP1.4, An MST hub with internal (virtual) ports must expose
   * certain DPCD registers over those ports. See sections 2.6.1.1.1
   * and 2.6.1.1.2 of Display Port specification v1.4 for details.
   *
   * May acquire mgr->lock
   *
   * Returns:
   * true if the port is a virtual DP peer device, false otherwise
   */
  static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
  {
  	struct drm_dp_mst_port *downstream_port;
  
  	if (!port || port->dpcd_rev < DP_DPCD_REV_14)
  		return false;
  
  	/* Virtual DP Sink (Internal Display Panel) */
  	if (port->port_num >= 8)
  		return true;
  
  	/* DP-to-HDMI Protocol Converter */
  	if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
  	    !port->mcs &&
  	    port->ldps)
  		return true;
  
  	/* DP-to-DP */
  	mutex_lock(&port->mgr->lock);
  	if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
  	    port->mstb &&
  	    port->mstb->num_ports == 2) {
  		list_for_each_entry(downstream_port, &port->mstb->ports, next) {
  			if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
  			    !downstream_port->input) {
  				mutex_unlock(&port->mgr->lock);
  				return true;
  			}
  		}
  	}
  	mutex_unlock(&port->mgr->lock);
  
  	return false;
  }
  
  /**
   * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
   * @port: The port to check. A leaf of the MST tree with an attached display.
   *
   * Depending on the situation, DSC may be enabled via the endpoint aux,
   * the immediately upstream aux, or the connector's physical aux.
   *
   * This is both the correct aux to read DSC_CAPABILITY and the
   * correct aux to write DSC_ENABLED.
   *
   * This operation can be expensive (up to four aux reads), so
   * the caller should cache the return.
   *
   * Returns:
   * NULL if DSC cannot be enabled on this port, otherwise the aux device
   */
  struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
  {
  	struct drm_dp_mst_port *immediate_upstream_port;
  	struct drm_dp_mst_port *fec_port;
a4292e521   Chris Wilson   drm: Match drm_dp...
5715
  	struct drm_dp_desc desc = {};
c2bc1b6ea   David Francis   drm/dp_mst: Add h...
5716
5717
5718
5719
5720
5721
5722
5723
5724
5725
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745
5746
5747
5748
5749
5750
5751
5752
5753
5754
5755
5756
5757
5758
5759
5760
5761
5762
5763
5764
5765
5766
5767
5768
  	u8 endpoint_fec;
  	u8 endpoint_dsc;
  
  	if (!port)
  		return NULL;
  
  	if (port->parent->port_parent)
  		immediate_upstream_port = port->parent->port_parent;
  	else
  		immediate_upstream_port = NULL;
  
  	fec_port = immediate_upstream_port;
  	while (fec_port) {
  		/*
  		 * Each physical link (i.e. not a virtual port) between the
  		 * output and the primary device must support FEC
  		 */
  		if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
  		    !fec_port->fec_capable)
  			return NULL;
  
  		fec_port = fec_port->parent->port_parent;
  	}
  
  	/* DP-to-DP peer device */
  	if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
  		u8 upstream_dsc;
  
  		if (drm_dp_dpcd_read(&port->aux,
  				     DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
  			return NULL;
  		if (drm_dp_dpcd_read(&port->aux,
  				     DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
  			return NULL;
  		if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
  				     DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
  			return NULL;
  
  		/* Enpoint decompression with DP-to-DP peer device */
  		if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
  		    (endpoint_fec & DP_FEC_CAPABLE) &&
  		    (upstream_dsc & 0x2) /* DSC passthrough */)
  			return &port->aux;
  
  		/* Virtual DPCD decompression with DP-to-DP peer device */
  		return &immediate_upstream_port->aux;
  	}
  
  	/* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
  	if (drm_dp_mst_is_virtual_dpcd(port))
  		return &port->aux;
  
  	/*
5b03f9d86   Mikita Lipski   drm/dp_mst: Add n...
5769
5770
5771
5772
5773
5774
5775
5776
5777
  	 * Synaptics quirk
  	 * Applies to ports for which:
  	 * - Physical aux has Synaptics OUI
  	 * - DPv1.4 or higher
  	 * - Port is on primary branch device
  	 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
  	 */
  	if (drm_dp_read_desc(port->mgr->aux, &desc, true))
  		return NULL;
0883ce814   Lyude Paul   drm/dp: Introduce...
5778
5779
  	if (drm_dp_has_quirk(&desc, 0,
  			     DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
5b03f9d86   Mikita Lipski   drm/dp_mst: Add n...
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790
5791
5792
5793
5794
  	    port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
  	    port->parent == port->mgr->mst_primary) {
  		u8 downstreamport;
  
  		if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
  				     &downstreamport, 1) < 0)
  			return NULL;
  
  		if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
  		   ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
  		     != DP_DWN_STRM_PORT_TYPE_ANALOG))
  			return port->mgr->aux;
  	}
  
  	/*
c2bc1b6ea   David Francis   drm/dp_mst: Add h...
5795
5796
5797
5798
5799
5800
5801
5802
5803
5804
5805
5806
5807
5808
5809
5810
5811
5812
  	 * The check below verifies if the MST sink
  	 * connected to the GPU is capable of DSC -
  	 * therefore the endpoint needs to be
  	 * both DSC and FEC capable.
  	 */
  	if (drm_dp_dpcd_read(&port->aux,
  	   DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
  		return NULL;
  	if (drm_dp_dpcd_read(&port->aux,
  	   DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
  		return NULL;
  	if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
  	   (endpoint_fec & DP_FEC_CAPABLE))
  		return &port->aux;
  
  	return NULL;
  }
  EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);