Blame view

drivers/misc/sgi-gru/grufault.c 22.6 KB
1a59d1b8e   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-or-later
142586409   Jack Steiner   GRU Driver: page ...
2
3
4
5
6
7
8
9
10
11
  /*
   * SN Platform GRU Driver
   *
   *              FAULT HANDLER FOR GRU DETECTED TLB MISSES
   *
   * This file contains code that handles TLB misses within the GRU.
   * These misses are reported either via interrupts or user polling of
   * the user CB.
   *
   *  Copyright (c) 2008 Silicon Graphics, Inc.  All Rights Reserved.
142586409   Jack Steiner   GRU Driver: page ...
12
13
14
15
16
17
18
19
20
21
   */
  
  #include <linux/kernel.h>
  #include <linux/errno.h>
  #include <linux/spinlock.h>
  #include <linux/mm.h>
  #include <linux/hugetlb.h>
  #include <linux/device.h>
  #include <linux/io.h>
  #include <linux/uaccess.h>
bb04aa78e   Jack Steiner   sgi-gru: add supp...
22
  #include <linux/security.h>
268bb0ce3   Linus Torvalds   sanitize <linux/p...
23
  #include <linux/prefetch.h>
142586409   Jack Steiner   GRU Driver: page ...
24
25
26
27
28
29
  #include <asm/pgtable.h>
  #include "gru.h"
  #include "grutables.h"
  #include "grulib.h"
  #include "gru_instructions.h"
  #include <asm/uv/uv_hub.h>
9c13cb331   Jack Steiner   gru: add symbolic...
30
31
32
33
  /* Return codes for vtop functions */
  #define VTOP_SUCCESS               0
  #define VTOP_INVALID               -1
  #define VTOP_RETRY                 -2
142586409   Jack Steiner   GRU Driver: page ...
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
  /*
   * Test if a physical address is a valid GRU GSEG address
   */
  static inline int is_gru_paddr(unsigned long paddr)
  {
  	return paddr >= gru_start_paddr && paddr < gru_end_paddr;
  }
  
  /*
   * Find the vma of a GRU segment. Caller must hold mmap_sem.
   */
  struct vm_area_struct *gru_find_vma(unsigned long vaddr)
  {
  	struct vm_area_struct *vma;
  
  	vma = find_vma(current->mm, vaddr);
  	if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops)
  		return vma;
  	return NULL;
  }
  
  /*
   * Find and lock the gts that contains the specified user vaddr.
   *
   * Returns:
   * 	- *gts with the mmap_sem locked for read and the GTS locked.
   *	- NULL if vaddr invalid OR is not a valid GSEG vaddr.
   */
  
  static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
  {
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
  	struct gru_thread_state *gts = NULL;
  
  	down_read(&mm->mmap_sem);
  	vma = gru_find_vma(vaddr);
  	if (vma)
  		gts = gru_find_thread_state(vma, TSID(vaddr, vma));
  	if (gts)
  		mutex_lock(&gts->ts_ctxlock);
  	else
  		up_read(&mm->mmap_sem);
  	return gts;
  }
  
  static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
  {
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
e006043a4   Jack Steiner   gru: check for va...
84
  	struct gru_thread_state *gts = ERR_PTR(-EINVAL);
142586409   Jack Steiner   GRU Driver: page ...
85
86
87
  
  	down_write(&mm->mmap_sem);
  	vma = gru_find_vma(vaddr);
e006043a4   Jack Steiner   gru: check for va...
88
89
90
91
92
93
94
95
96
  	if (!vma)
  		goto err;
  
  	gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
  	if (IS_ERR(gts))
  		goto err;
  	mutex_lock(&gts->ts_ctxlock);
  	downgrade_write(&mm->mmap_sem);
  	return gts;
142586409   Jack Steiner   GRU Driver: page ...
97

e006043a4   Jack Steiner   gru: check for va...
98
99
  err:
  	up_write(&mm->mmap_sem);
142586409   Jack Steiner   GRU Driver: page ...
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
  	return gts;
  }
  
  /*
   * Unlock a GTS that was previously locked with gru_find_lock_gts().
   */
  static void gru_unlock_gts(struct gru_thread_state *gts)
  {
  	mutex_unlock(&gts->ts_ctxlock);
  	up_read(&current->mm->mmap_sem);
  }
  
  /*
   * Set a CB.istatus to active using a user virtual address. This must be done
   * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
   * If the line is evicted, the status may be lost. The in-cache update
   * is necessary to prevent the user from seeing a stale cb.istatus that will
   * change as soon as the TFH restart is complete. Races may cause an
   * occasional failure to clear the cb.istatus, but that is ok.
142586409   Jack Steiner   GRU Driver: page ...
119
   */
b61fc69bb   Jack Steiner   gru: fix istatus ...
120
  static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
142586409   Jack Steiner   GRU Driver: page ...
121
  {
b61fc69bb   Jack Steiner   gru: fix istatus ...
122
123
  	if (cbk) {
  		cbk->istatus = CBS_ACTIVE;
142586409   Jack Steiner   GRU Driver: page ...
124
125
126
127
  	}
  }
  
  /*
142586409   Jack Steiner   GRU Driver: page ...
128
129
130
131
132
133
134
135
136
137
138
   * Read & clear a TFM
   *
   * The GRU has an array of fault maps. A map is private to a cpu
   * Only one cpu will be accessing a cpu's fault map.
   *
   * This function scans the cpu-private fault map & clears all bits that
   * are set. The function returns a bitmap that indicates the bits that
   * were cleared. Note that sense the maps may be updated asynchronously by
   * the GRU, atomic operations must be used to clear bits.
   */
  static void get_clear_fault_map(struct gru_state *gru,
4a7a17c11   Jack Steiner   gru: support inst...
139
140
  				struct gru_tlb_fault_map *imap,
  				struct gru_tlb_fault_map *dmap)
142586409   Jack Steiner   GRU Driver: page ...
141
142
143
144
145
146
147
148
149
150
  {
  	unsigned long i, k;
  	struct gru_tlb_fault_map *tfm;
  
  	tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
  	prefetchw(tfm);		/* Helps on hardware, required for emulator */
  	for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
  		k = tfm->fault_bits[i];
  		if (k)
  			k = xchg(&tfm->fault_bits[i], 0UL);
4a7a17c11   Jack Steiner   gru: support inst...
151
152
153
154
155
  		imap->fault_bits[i] = k;
  		k = tfm->done_bits[i];
  		if (k)
  			k = xchg(&tfm->done_bits[i], 0UL);
  		dmap->fault_bits[i] = k;
142586409   Jack Steiner   GRU Driver: page ...
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
  	}
  
  	/*
  	 * Not functionally required but helps performance. (Required
  	 * on emulator)
  	 */
  	gru_flush_cache(tfm);
  }
  
  /*
   * Atomic (interrupt context) & non-atomic (user context) functions to
   * convert a vaddr into a physical address. The size of the page
   * is returned in pageshift.
   * 	returns:
   * 		  0 - successful
   * 		< 0 - error code
   * 		  1 - (atomic only) try again in non-atomic context
   */
  static int non_atomic_pte_lookup(struct vm_area_struct *vma,
  				 unsigned long vaddr, int write,
  				 unsigned long *paddr, int *pageshift)
  {
  	struct page *page;
74ccd0952   Jack Steiner   gru: add hugepage...
179
180
181
  #ifdef CONFIG_HUGETLB_PAGE
  	*pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
  #else
142586409   Jack Steiner   GRU Driver: page ...
182
  	*pageshift = PAGE_SHIFT;
74ccd0952   Jack Steiner   gru: add hugepage...
183
  #endif
768ae309a   Lorenzo Stoakes   mm: replace get_u...
184
  	if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
142586409   Jack Steiner   GRU Driver: page ...
185
186
187
188
189
190
191
  		return -EFAULT;
  	*paddr = page_to_phys(page);
  	put_page(page);
  	return 0;
  }
  
  /*
142586409   Jack Steiner   GRU Driver: page ...
192
193
194
195
196
   * atomic_pte_lookup
   *
   * Convert a user virtual address to a physical address
   * Only supports Intel large pages (2MB only) on x86_64.
   *	ZZZ - hugepage support is incomplete
923f7f697   Jack Steiner   GRU driver: minor...
197
198
199
   *
   * NOTE: mmap_sem is already held on entry to this function. This
   * guarantees existence of the page tables.
142586409   Jack Steiner   GRU Driver: page ...
200
201
202
203
204
   */
  static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
  	int write, unsigned long *paddr, int *pageshift)
  {
  	pgd_t *pgdp;
c2febafc6   Kirill A. Shutemov   mm: convert gener...
205
  	p4d_t *p4dp;
142586409   Jack Steiner   GRU Driver: page ...
206
  	pud_t *pudp;
c2febafc6   Kirill A. Shutemov   mm: convert gener...
207
  	pmd_t *pmdp;
142586409   Jack Steiner   GRU Driver: page ...
208
  	pte_t pte;
142586409   Jack Steiner   GRU Driver: page ...
209
210
211
  	pgdp = pgd_offset(vma->vm_mm, vaddr);
  	if (unlikely(pgd_none(*pgdp)))
  		goto err;
c2febafc6   Kirill A. Shutemov   mm: convert gener...
212
213
214
215
216
  	p4dp = p4d_offset(pgdp, vaddr);
  	if (unlikely(p4d_none(*p4dp)))
  		goto err;
  
  	pudp = pud_offset(p4dp, vaddr);
142586409   Jack Steiner   GRU Driver: page ...
217
218
219
220
221
222
223
224
225
226
227
228
  	if (unlikely(pud_none(*pudp)))
  		goto err;
  
  	pmdp = pmd_offset(pudp, vaddr);
  	if (unlikely(pmd_none(*pmdp)))
  		goto err;
  #ifdef CONFIG_X86_64
  	if (unlikely(pmd_large(*pmdp)))
  		pte = *(pte_t *) pmdp;
  	else
  #endif
  		pte = *pte_offset_kernel(pmdp, vaddr);
142586409   Jack Steiner   GRU Driver: page ...
229
230
231
232
233
  	if (unlikely(!pte_present(pte) ||
  		     (write && (!pte_write(pte) || !pte_dirty(pte)))))
  		return 1;
  
  	*paddr = pte_pfn(pte) << PAGE_SHIFT;
023a407f1   Jack Steiner   [IA64] Fix GRU co...
234
  #ifdef CONFIG_HUGETLB_PAGE
142586409   Jack Steiner   GRU Driver: page ...
235
  	*pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
023a407f1   Jack Steiner   [IA64] Fix GRU co...
236
237
238
  #else
  	*pageshift = PAGE_SHIFT;
  #endif
142586409   Jack Steiner   GRU Driver: page ...
239
240
241
  	return 0;
  
  err:
142586409   Jack Steiner   GRU Driver: page ...
242
243
  	return 1;
  }
ecdaf2b55   Jack Steiner   sgi-gru: restruct...
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
  static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
  		    int write, int atomic, unsigned long *gpa, int *pageshift)
  {
  	struct mm_struct *mm = gts->ts_mm;
  	struct vm_area_struct *vma;
  	unsigned long paddr;
  	int ret, ps;
  
  	vma = find_vma(mm, vaddr);
  	if (!vma)
  		goto inval;
  
  	/*
  	 * Atomic lookup is faster & usually works even if called in non-atomic
  	 * context.
  	 */
  	rmb();	/* Must/check ms_range_active before loading PTEs */
  	ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
  	if (ret) {
  		if (atomic)
  			goto upm;
  		if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
  			goto inval;
  	}
  	if (is_gru_paddr(paddr))
  		goto inval;
  	paddr = paddr & ~((1UL << ps) - 1);
  	*gpa = uv_soc_phys_ram_to_gpa(paddr);
  	*pageshift = ps;
9c13cb331   Jack Steiner   gru: add symbolic...
273
  	return VTOP_SUCCESS;
ecdaf2b55   Jack Steiner   sgi-gru: restruct...
274
275
  
  inval:
9c13cb331   Jack Steiner   gru: add symbolic...
276
  	return VTOP_INVALID;
ecdaf2b55   Jack Steiner   sgi-gru: restruct...
277
  upm:
9c13cb331   Jack Steiner   gru: add symbolic...
278
  	return VTOP_RETRY;
ecdaf2b55   Jack Steiner   sgi-gru: restruct...
279
  }
142586409   Jack Steiner   GRU Driver: page ...
280
  /*
c550222f6   Jack Steiner   gru: preload tlb ...
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
   * Flush a CBE from cache. The CBE is clean in the cache. Dirty the
   * CBE cacheline so that the line will be written back to home agent.
   * Otherwise the line may be silently dropped. This has no impact
   * except on performance.
   */
  static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
  {
  	if (unlikely(cbe)) {
  		cbe->cbrexecstatus = 0;         /* make CL dirty */
  		gru_flush_cache(cbe);
  	}
  }
  
  /*
   * Preload the TLB with entries that may be required. Currently, preloading
   * is implemented only for BCOPY. Preload  <tlb_preload_count> pages OR to
   * the end of the bcopy tranfer, whichever is smaller.
   */
  static void gru_preload_tlb(struct gru_state *gru,
  			struct gru_thread_state *gts, int atomic,
  			unsigned long fault_vaddr, int asid, int write,
  			unsigned char tlb_preload_count,
  			struct gru_tlb_fault_handle *tfh,
  			struct gru_control_block_extended *cbe)
  {
  	unsigned long vaddr = 0, gpa;
  	int ret, pageshift;
  
  	if (cbe->opccpy != OP_BCOPY)
  		return;
  
  	if (fault_vaddr == cbe->cbe_baddr0)
  		vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
  	else if (fault_vaddr == cbe->cbe_baddr1)
  		vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
  
  	fault_vaddr &= PAGE_MASK;
  	vaddr &= PAGE_MASK;
  	vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
  
  	while (vaddr > fault_vaddr) {
  		ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
  		if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
  					  GRU_PAGESIZE(pageshift)))
  			return;
  		gru_dbg(grudev,
  			"%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx
  ",
  			atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
  			vaddr, asid, write, pageshift, gpa);
  		vaddr -= PAGE_SIZE;
  		STAT(tlb_preload_page);
  	}
  }
  
  /*
142586409   Jack Steiner   GRU Driver: page ...
337
338
339
340
341
342
343
344
345
   * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
   *	Input:
   *		cb    Address of user CBR. Null if not running in user context
   * 	Return:
   * 		  0 = dropin, exception, or switch to UPM successful
   * 		  1 = range invalidate active
   * 		< 0 = error code
   *
   */
2ce4d4c93   Jack Steiner   gru: fix GRU inte...
346
347
  static int gru_try_dropin(struct gru_state *gru,
  			  struct gru_thread_state *gts,
142586409   Jack Steiner   GRU Driver: page ...
348
  			  struct gru_tlb_fault_handle *tfh,
b61fc69bb   Jack Steiner   gru: fix istatus ...
349
  			  struct gru_instruction_bits *cbk)
142586409   Jack Steiner   GRU Driver: page ...
350
  {
c550222f6   Jack Steiner   gru: preload tlb ...
351
352
  	struct gru_control_block_extended *cbe = NULL;
  	unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
563447d7e   Jack Steiner   gru: add addition...
353
  	int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
ecdaf2b55   Jack Steiner   sgi-gru: restruct...
354
  	unsigned long gpa = 0, vaddr = 0;
142586409   Jack Steiner   GRU Driver: page ...
355
356
357
358
359
360
361
362
363
  
  	/*
  	 * NOTE: The GRU contains magic hardware that eliminates races between
  	 * TLB invalidates and TLB dropins. If an invalidate occurs
  	 * in the window between reading the TFH and the subsequent TLB dropin,
  	 * the dropin is ignored. This eliminates the need for additional locks.
  	 */
  
  	/*
c550222f6   Jack Steiner   gru: preload tlb ...
364
365
366
367
368
369
370
371
  	 * Prefetch the CBE if doing TLB preloading
  	 */
  	if (unlikely(tlb_preload_count)) {
  		cbe = gru_tfh_to_cbe(tfh);
  		prefetchw(cbe);
  	}
  
  	/*
142586409   Jack Steiner   GRU Driver: page ...
372
373
374
375
  	 * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
  	 * Might be a hardware race OR a stupid user. Ignore FMM because FMM
  	 * is a transient state.
  	 */
270952a90   Jack Steiner   gru: update to re...
376
377
  	if (tfh->status != TFHSTATUS_EXCEPTION) {
  		gru_flush_cache(tfh);
67bf04a5c   Jack Steiner   gru: fix prefetch...
378
  		sync_core();
270952a90   Jack Steiner   gru: update to re...
379
380
381
382
  		if (tfh->status != TFHSTATUS_EXCEPTION)
  			goto failnoexception;
  		STAT(tfh_stale_on_fault);
  	}
142586409   Jack Steiner   GRU Driver: page ...
383
384
  	if (tfh->state == TFHSTATE_IDLE)
  		goto failidle;
b61fc69bb   Jack Steiner   gru: fix istatus ...
385
  	if (tfh->state == TFHSTATE_MISS_FMM && cbk)
142586409   Jack Steiner   GRU Driver: page ...
386
387
388
389
390
  		goto failfmm;
  
  	write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
  	vaddr = tfh->missvaddr;
  	asid = tfh->missasid;
563447d7e   Jack Steiner   gru: add addition...
391
  	indexway = tfh->indexway;
142586409   Jack Steiner   GRU Driver: page ...
392
393
394
395
396
397
398
399
400
401
402
  	if (asid == 0)
  		goto failnoasid;
  
  	rmb();	/* TFH must be cache resident before reading ms_range_active */
  
  	/*
  	 * TFH is cache resident - at least briefly. Fail the dropin
  	 * if a range invalidate is active.
  	 */
  	if (atomic_read(&gts->ts_gms->ms_range_active))
  		goto failactive;
ecdaf2b55   Jack Steiner   sgi-gru: restruct...
403
  	ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
9c13cb331   Jack Steiner   gru: add symbolic...
404
  	if (ret == VTOP_INVALID)
142586409   Jack Steiner   GRU Driver: page ...
405
  		goto failinval;
9c13cb331   Jack Steiner   gru: add symbolic...
406
  	if (ret == VTOP_RETRY)
ecdaf2b55   Jack Steiner   sgi-gru: restruct...
407
  		goto failupm;
142586409   Jack Steiner   GRU Driver: page ...
408

7b8274e93   Jack Steiner   sgi-gru: support ...
409
410
  	if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
  		gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
99f7c229b   Jack Steiner   gru: allow users ...
411
  		if (atomic || !gru_update_cch(gts)) {
7b8274e93   Jack Steiner   sgi-gru: support ...
412
413
414
415
  			gts->ts_force_cch_reload = 1;
  			goto failupm;
  		}
  	}
c550222f6   Jack Steiner   gru: preload tlb ...
416
417
  
  	if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
2ce4d4c93   Jack Steiner   gru: fix GRU inte...
418
  		gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
c550222f6   Jack Steiner   gru: preload tlb ...
419
420
  		gru_flush_cache_cbe(cbe);
  	}
b61fc69bb   Jack Steiner   gru: fix istatus ...
421
  	gru_cb_set_istatus_active(cbk);
5958ab88f   Jack Steiner   gru: improve GRU ...
422
  	gts->ustats.tlbdropin++;
142586409   Jack Steiner   GRU Driver: page ...
423
424
  	tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
  			  GRU_PAGESIZE(pageshift));
142586409   Jack Steiner   GRU Driver: page ...
425
  	gru_dbg(grudev,
563447d7e   Jack Steiner   gru: add addition...
426
427
428
  		"%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
  		" rw %d, ps %d, gpa 0x%lx
  ",
2ce4d4c93   Jack Steiner   gru: fix GRU inte...
429
  		atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
563447d7e   Jack Steiner   gru: add addition...
430
431
  		indexway, write, pageshift, gpa);
  	STAT(tlb_dropin);
142586409   Jack Steiner   GRU Driver: page ...
432
433
434
435
436
437
438
  	return 0;
  
  failnoasid:
  	/* No asid (delayed unload). */
  	STAT(tlb_dropin_fail_no_asid);
  	gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx
  ", tfh, vaddr);
b61fc69bb   Jack Steiner   gru: fix istatus ...
439
  	if (!cbk)
142586409   Jack Steiner   GRU Driver: page ...
440
441
442
  		tfh_user_polling_mode(tfh);
  	else
  		gru_flush_cache(tfh);
c550222f6   Jack Steiner   gru: preload tlb ...
443
  	gru_flush_cache_cbe(cbe);
142586409   Jack Steiner   GRU Driver: page ...
444
445
446
447
448
  	return -EAGAIN;
  
  failupm:
  	/* Atomic failure switch CBR to UPM */
  	tfh_user_polling_mode(tfh);
c550222f6   Jack Steiner   gru: preload tlb ...
449
  	gru_flush_cache_cbe(cbe);
142586409   Jack Steiner   GRU Driver: page ...
450
451
452
453
454
455
456
  	STAT(tlb_dropin_fail_upm);
  	gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx
  ", tfh, vaddr);
  	return 1;
  
  failfmm:
  	/* FMM state on UPM call */
fe5bb6b00   Jack Steiner   sgi-gru: misc GRU...
457
  	gru_flush_cache(tfh);
c550222f6   Jack Steiner   gru: preload tlb ...
458
  	gru_flush_cache_cbe(cbe);
142586409   Jack Steiner   GRU Driver: page ...
459
460
461
462
  	STAT(tlb_dropin_fail_fmm);
  	gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d
  ", tfh, tfh->state);
  	return 0;
cd1334f03   Jack Steiner   gru: bug fixes fo...
463
464
465
  failnoexception:
  	/* TFH status did not show exception pending */
  	gru_flush_cache(tfh);
c550222f6   Jack Steiner   gru: preload tlb ...
466
  	gru_flush_cache_cbe(cbe);
b61fc69bb   Jack Steiner   gru: fix istatus ...
467
468
  	if (cbk)
  		gru_flush_cache(cbk);
cd1334f03   Jack Steiner   gru: bug fixes fo...
469
  	STAT(tlb_dropin_fail_no_exception);
b61fc69bb   Jack Steiner   gru: fix istatus ...
470
471
472
  	gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d
  ",
  		tfh, tfh->status, tfh->state);
cd1334f03   Jack Steiner   gru: bug fixes fo...
473
  	return 0;
142586409   Jack Steiner   GRU Driver: page ...
474
  failidle:
cd1334f03   Jack Steiner   gru: bug fixes fo...
475
  	/* TFH state was idle  - no miss pending */
142586409   Jack Steiner   GRU Driver: page ...
476
  	gru_flush_cache(tfh);
c550222f6   Jack Steiner   gru: preload tlb ...
477
  	gru_flush_cache_cbe(cbe);
b61fc69bb   Jack Steiner   gru: fix istatus ...
478
479
  	if (cbk)
  		gru_flush_cache(cbk);
142586409   Jack Steiner   GRU Driver: page ...
480
481
482
483
484
485
486
487
  	STAT(tlb_dropin_fail_idle);
  	gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d
  ", tfh, tfh->state);
  	return 0;
  
  failinval:
  	/* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
  	tfh_exception(tfh);
c550222f6   Jack Steiner   gru: preload tlb ...
488
  	gru_flush_cache_cbe(cbe);
142586409   Jack Steiner   GRU Driver: page ...
489
490
491
492
493
494
495
  	STAT(tlb_dropin_fail_invalid);
  	gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx
  ", tfh, vaddr);
  	return -EFAULT;
  
  failactive:
  	/* Range invalidate active. Switch to UPM iff atomic */
b61fc69bb   Jack Steiner   gru: fix istatus ...
496
  	if (!cbk)
142586409   Jack Steiner   GRU Driver: page ...
497
498
499
  		tfh_user_polling_mode(tfh);
  	else
  		gru_flush_cache(tfh);
c550222f6   Jack Steiner   gru: preload tlb ...
500
  	gru_flush_cache_cbe(cbe);
142586409   Jack Steiner   GRU Driver: page ...
501
502
503
504
505
506
507
508
509
510
511
512
513
  	STAT(tlb_dropin_fail_range_active);
  	gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx
  ",
  		tfh, vaddr);
  	return 1;
  }
  
  /*
   * Process an external interrupt from the GRU. This interrupt is
   * caused by a TLB miss.
   * Note that this is the interrupt handler that is registered with linux
   * interrupt handlers.
   */
4107e1d38   Jack Steiner   gru: update irq i...
514
  static irqreturn_t gru_intr(int chiplet, int blade)
142586409   Jack Steiner   GRU Driver: page ...
515
516
  {
  	struct gru_state *gru;
4a7a17c11   Jack Steiner   gru: support inst...
517
  	struct gru_tlb_fault_map imap, dmap;
142586409   Jack Steiner   GRU Driver: page ...
518
519
  	struct gru_thread_state *gts;
  	struct gru_tlb_fault_handle *tfh = NULL;
2ce4d4c93   Jack Steiner   gru: fix GRU inte...
520
  	struct completion *cmp;
142586409   Jack Steiner   GRU Driver: page ...
521
522
523
  	int cbrnum, ctxnum;
  
  	STAT(intr);
4107e1d38   Jack Steiner   gru: update irq i...
524
  	gru = &gru_base[blade]->bs_grus[chiplet];
142586409   Jack Steiner   GRU Driver: page ...
525
  	if (!gru) {
4107e1d38   Jack Steiner   gru: update irq i...
526
527
528
  		dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d
  ",
  			raw_smp_processor_id(), chiplet);
142586409   Jack Steiner   GRU Driver: page ...
529
530
  		return IRQ_NONE;
  	}
4a7a17c11   Jack Steiner   gru: support inst...
531
  	get_clear_fault_map(gru, &imap, &dmap);
4107e1d38   Jack Steiner   gru: update irq i...
532
533
534
535
536
537
  	gru_dbg(grudev,
  		"cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx
  ",
  		smp_processor_id(), chiplet, gru->gs_gid,
  		imap.fault_bits[0], imap.fault_bits[1],
  		dmap.fault_bits[0], dmap.fault_bits[1]);
4a7a17c11   Jack Steiner   gru: support inst...
538
539
  
  	for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
563447d7e   Jack Steiner   gru: add addition...
540
  		STAT(intr_cbr);
2ce4d4c93   Jack Steiner   gru: fix GRU inte...
541
542
543
  		cmp = gru->gs_blade->bs_async_wq;
  		if (cmp)
  			complete(cmp);
4a7a17c11   Jack Steiner   gru: support inst...
544
545
  		gru_dbg(grudev, "gid %d, cbr_done %d, done %d
  ",
2ce4d4c93   Jack Steiner   gru: fix GRU inte...
546
  			gru->gs_gid, cbrnum, cmp ? cmp->done : -1);
4a7a17c11   Jack Steiner   gru: support inst...
547
  	}
142586409   Jack Steiner   GRU Driver: page ...
548

4a7a17c11   Jack Steiner   gru: support inst...
549
  	for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
563447d7e   Jack Steiner   gru: add addition...
550
  		STAT(intr_tfh);
142586409   Jack Steiner   GRU Driver: page ...
551
552
553
554
555
556
557
558
559
560
561
  		tfh = get_tfh_by_index(gru, cbrnum);
  		prefetchw(tfh);	/* Helps on hdw, required for emulator */
  
  		/*
  		 * When hardware sets a bit in the faultmap, it implicitly
  		 * locks the GRU context so that it cannot be unloaded.
  		 * The gts cannot change until a TFH start/writestart command
  		 * is issued.
  		 */
  		ctxnum = tfh->ctxnum;
  		gts = gru->gs_gts[ctxnum];
2ce4d4c93   Jack Steiner   gru: fix GRU inte...
562
563
564
565
566
  		/* Spurious interrupts can cause this. Ignore. */
  		if (!gts) {
  			STAT(intr_spurious);
  			continue;
  		}
142586409   Jack Steiner   GRU Driver: page ...
567
568
569
570
  		/*
  		 * This is running in interrupt context. Trylock the mmap_sem.
  		 * If it fails, retry the fault in user context.
  		 */
5958ab88f   Jack Steiner   gru: improve GRU ...
571
  		gts->ustats.fmm_tlbmiss++;
cd1334f03   Jack Steiner   gru: bug fixes fo...
572
573
  		if (!gts->ts_force_cch_reload &&
  					down_read_trylock(&gts->ts_mm->mmap_sem)) {
2ce4d4c93   Jack Steiner   gru: fix GRU inte...
574
  			gru_try_dropin(gru, gts, tfh, NULL);
142586409   Jack Steiner   GRU Driver: page ...
575
576
577
  			up_read(&gts->ts_mm->mmap_sem);
  		} else {
  			tfh_user_polling_mode(tfh);
438846043   Jack Steiner   sgi-gru: improvem...
578
  			STAT(intr_mm_lock_failed);
142586409   Jack Steiner   GRU Driver: page ...
579
580
581
582
  		}
  	}
  	return IRQ_HANDLED;
  }
4107e1d38   Jack Steiner   gru: update irq i...
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
  irqreturn_t gru0_intr(int irq, void *dev_id)
  {
  	return gru_intr(0, uv_numa_blade_id());
  }
  
  irqreturn_t gru1_intr(int irq, void *dev_id)
  {
  	return gru_intr(1, uv_numa_blade_id());
  }
  
  irqreturn_t gru_intr_mblade(int irq, void *dev_id)
  {
  	int blade;
  
  	for_each_possible_blade(blade) {
  		if (uv_blade_nr_possible_cpus(blade))
  			continue;
bffcd1129   Peng Hao   misc/sgi-gru/gruf...
600
601
  		gru_intr(0, blade);
  		gru_intr(1, blade);
4107e1d38   Jack Steiner   gru: update irq i...
602
603
604
  	}
  	return IRQ_HANDLED;
  }
142586409   Jack Steiner   GRU Driver: page ...
605
606
607
  
  static int gru_user_dropin(struct gru_thread_state *gts,
  			   struct gru_tlb_fault_handle *tfh,
b61fc69bb   Jack Steiner   gru: fix istatus ...
608
  			   void *cb)
142586409   Jack Steiner   GRU Driver: page ...
609
610
611
  {
  	struct gru_mm_struct *gms = gts->ts_gms;
  	int ret;
5958ab88f   Jack Steiner   gru: improve GRU ...
612
  	gts->ustats.upm_tlbmiss++;
142586409   Jack Steiner   GRU Driver: page ...
613
614
615
616
  	while (1) {
  		wait_event(gms->ms_wait_queue,
  			   atomic_read(&gms->ms_range_active) == 0);
  		prefetchw(tfh);	/* Helps on hdw, required for emulator */
2ce4d4c93   Jack Steiner   gru: fix GRU inte...
617
  		ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb);
142586409   Jack Steiner   GRU Driver: page ...
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
  		if (ret <= 0)
  			return ret;
  		STAT(call_os_wait_queue);
  	}
  }
  
  /*
   * This interface is called as a result of a user detecting a "call OS" bit
   * in a user CB. Normally means that a TLB fault has occurred.
   * 	cb - user virtual address of the CB
   */
  int gru_handle_user_call_os(unsigned long cb)
  {
  	struct gru_tlb_fault_handle *tfh;
  	struct gru_thread_state *gts;
b61fc69bb   Jack Steiner   gru: fix istatus ...
633
  	void *cbk;
142586409   Jack Steiner   GRU Driver: page ...
634
635
636
  	int ucbnum, cbrnum, ret = -EINVAL;
  
  	STAT(call_os);
142586409   Jack Steiner   GRU Driver: page ...
637
638
639
640
641
  
  	/* sanity check the cb pointer */
  	ucbnum = get_cb_number((void *)cb);
  	if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
  		return -EINVAL;
142586409   Jack Steiner   GRU Driver: page ...
642
643
644
645
  
  	gts = gru_find_lock_gts(cb);
  	if (!gts)
  		return -EINVAL;
563447d7e   Jack Steiner   gru: add addition...
646
647
  	gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p
  ", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
142586409   Jack Steiner   GRU Driver: page ...
648

fe5bb6b00   Jack Steiner   sgi-gru: misc GRU...
649
  	if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
142586409   Jack Steiner   GRU Driver: page ...
650
  		goto exit;
142586409   Jack Steiner   GRU Driver: page ...
651

55484c45d   Jack Steiner   gru: allow users ...
652
  	gru_check_context_placement(gts);
fe5bb6b00   Jack Steiner   sgi-gru: misc GRU...
653

7b8274e93   Jack Steiner   sgi-gru: support ...
654
655
656
657
  	/*
  	 * CCH may contain stale data if ts_force_cch_reload is set.
  	 */
  	if (gts->ts_gru && gts->ts_force_cch_reload) {
7b8274e93   Jack Steiner   sgi-gru: support ...
658
  		gts->ts_force_cch_reload = 0;
99f7c229b   Jack Steiner   gru: allow users ...
659
  		gru_update_cch(gts);
7b8274e93   Jack Steiner   sgi-gru: support ...
660
  	}
142586409   Jack Steiner   GRU Driver: page ...
661
662
  	ret = -EAGAIN;
  	cbrnum = thread_cbr_number(gts, ucbnum);
99f7c229b   Jack Steiner   gru: allow users ...
663
  	if (gts->ts_gru) {
142586409   Jack Steiner   GRU Driver: page ...
664
  		tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
b61fc69bb   Jack Steiner   gru: fix istatus ...
665
666
667
  		cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
  				gts->ts_ctxnum, ucbnum);
  		ret = gru_user_dropin(gts, tfh, cbk);
142586409   Jack Steiner   GRU Driver: page ...
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
  	}
  exit:
  	gru_unlock_gts(gts);
  	return ret;
  }
  
  /*
   * Fetch the exception detail information for a CB that terminated with
   * an exception.
   */
  int gru_get_exception_detail(unsigned long arg)
  {
  	struct control_block_extended_exc_detail excdet;
  	struct gru_control_block_extended *cbe;
  	struct gru_thread_state *gts;
  	int ucbnum, cbrnum, ret;
  
  	STAT(user_exception);
  	if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
  		return -EFAULT;
142586409   Jack Steiner   GRU Driver: page ...
688
689
690
  	gts = gru_find_lock_gts(excdet.cb);
  	if (!gts)
  		return -EINVAL;
563447d7e   Jack Steiner   gru: add addition...
691
692
  	gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p
  ", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
fe5bb6b00   Jack Steiner   sgi-gru: misc GRU...
693
694
695
696
  	ucbnum = get_cb_number((void *)excdet.cb);
  	if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
  		ret = -EINVAL;
  	} else if (gts->ts_gru) {
142586409   Jack Steiner   GRU Driver: page ...
697
698
  		cbrnum = thread_cbr_number(gts, ucbnum);
  		cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
1a2c09e3b   Jack Steiner   gru: fix cache co...
699
  		gru_flush_cache(cbe);	/* CBE not coherent */
67bf04a5c   Jack Steiner   gru: fix prefetch...
700
  		sync_core();		/* make sure we are have current data */
142586409   Jack Steiner   GRU Driver: page ...
701
702
703
704
705
  		excdet.opc = cbe->opccpy;
  		excdet.exopc = cbe->exopccpy;
  		excdet.ecause = cbe->ecause;
  		excdet.exceptdet0 = cbe->idef1upd;
  		excdet.exceptdet1 = cbe->idef3upd;
cd1334f03   Jack Steiner   gru: bug fixes fo...
706
707
  		excdet.cbrstate = cbe->cbrstate;
  		excdet.cbrexecstatus = cbe->cbrexecstatus;
c550222f6   Jack Steiner   gru: preload tlb ...
708
  		gru_flush_cache_cbe(cbe);
142586409   Jack Steiner   GRU Driver: page ...
709
710
711
712
713
  		ret = 0;
  	} else {
  		ret = -EAGAIN;
  	}
  	gru_unlock_gts(gts);
cd1334f03   Jack Steiner   gru: bug fixes fo...
714
715
716
717
718
719
  	gru_dbg(grudev,
  		"cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
  		"exdet0 0x%lx, exdet1 0x%x
  ",
  		excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus,
  		excdet.ecause, excdet.exceptdet0, excdet.exceptdet1);
142586409   Jack Steiner   GRU Driver: page ...
720
721
722
723
724
725
726
727
  	if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
  		ret = -EFAULT;
  	return ret;
  }
  
  /*
   * User request to unload a context. Content is saved for possible reload.
   */
bb04aa78e   Jack Steiner   sgi-gru: add supp...
728
729
730
731
  static int gru_unload_all_contexts(void)
  {
  	struct gru_thread_state *gts;
  	struct gru_state *gru;
e1c3219d0   Jack Steiner   sgi-gru: macro fo...
732
  	int gid, ctxnum;
bb04aa78e   Jack Steiner   sgi-gru: add supp...
733
734
735
  
  	if (!capable(CAP_SYS_ADMIN))
  		return -EPERM;
e1c3219d0   Jack Steiner   sgi-gru: macro fo...
736
  	foreach_gid(gid) {
bb04aa78e   Jack Steiner   sgi-gru: add supp...
737
738
739
740
741
742
743
  		gru = GID_TO_GRU(gid);
  		spin_lock(&gru->gs_lock);
  		for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
  			gts = gru->gs_gts[ctxnum];
  			if (gts && mutex_trylock(&gts->ts_ctxlock)) {
  				spin_unlock(&gru->gs_lock);
  				gru_unload_context(gts, 1);
d57c82b10   Jack Steiner   gru: change conte...
744
  				mutex_unlock(&gts->ts_ctxlock);
bb04aa78e   Jack Steiner   sgi-gru: add supp...
745
746
747
748
749
750
751
  				spin_lock(&gru->gs_lock);
  			}
  		}
  		spin_unlock(&gru->gs_lock);
  	}
  	return 0;
  }
142586409   Jack Steiner   GRU Driver: page ...
752
753
754
755
756
757
758
759
760
761
762
  int gru_user_unload_context(unsigned long arg)
  {
  	struct gru_thread_state *gts;
  	struct gru_unload_context_req req;
  
  	STAT(user_unload_context);
  	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
  		return -EFAULT;
  
  	gru_dbg(grudev, "gseg 0x%lx
  ", req.gseg);
bb04aa78e   Jack Steiner   sgi-gru: add supp...
763
764
  	if (!req.gseg)
  		return gru_unload_all_contexts();
142586409   Jack Steiner   GRU Driver: page ...
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
  	gts = gru_find_lock_gts(req.gseg);
  	if (!gts)
  		return -EINVAL;
  
  	if (gts->ts_gru)
  		gru_unload_context(gts, 1);
  	gru_unlock_gts(gts);
  
  	return 0;
  }
  
  /*
   * User request to flush a range of virtual addresses from the GRU TLB
   * (Mainly for testing).
   */
  int gru_user_flush_tlb(unsigned long arg)
  {
  	struct gru_thread_state *gts;
  	struct gru_flush_tlb_req req;
1926ee85a   Jack Steiner   gru: fix potentia...
784
  	struct gru_mm_struct *gms;
142586409   Jack Steiner   GRU Driver: page ...
785
786
787
788
789
790
791
792
793
794
795
796
  
  	STAT(user_flush_tlb);
  	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
  		return -EFAULT;
  
  	gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx
  ", req.gseg,
  		req.vaddr, req.len);
  
  	gts = gru_find_lock_gts(req.gseg);
  	if (!gts)
  		return -EINVAL;
1926ee85a   Jack Steiner   gru: fix potentia...
797
  	gms = gts->ts_gms;
142586409   Jack Steiner   GRU Driver: page ...
798
  	gru_unlock_gts(gts);
1926ee85a   Jack Steiner   gru: fix potentia...
799
  	gru_flush_tlb_range(gms, req.vaddr, req.len);
142586409   Jack Steiner   GRU Driver: page ...
800
801
802
803
804
  
  	return 0;
  }
  
  /*
7e796a72a   Jack Steiner   gru: collect per-...
805
806
807
808
809
810
811
812
813
   * Fetch GSEG statisticss
   */
  long gru_get_gseg_statistics(unsigned long arg)
  {
  	struct gru_thread_state *gts;
  	struct gru_get_gseg_statistics_req req;
  
  	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
  		return -EFAULT;
091f1a106   Jack Steiner   gru: add comments...
814
815
816
817
818
  	/*
  	 * The library creates arrays of contexts for threaded programs.
  	 * If no gts exists in the array, the context has never been used & all
  	 * statistics are implicitly 0.
  	 */
7e796a72a   Jack Steiner   gru: collect per-...
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
  	gts = gru_find_lock_gts(req.gseg);
  	if (gts) {
  		memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats));
  		gru_unlock_gts(gts);
  	} else {
  		memset(&req.stats, 0, sizeof(gts->ustats));
  	}
  
  	if (copy_to_user((void __user *)arg, &req, sizeof(req)))
  		return -EFAULT;
  
  	return 0;
  }
  
  /*
142586409   Jack Steiner   GRU Driver: page ...
834
835
836
   * Register the current task as the user of the GSEG slice.
   * Needed for TLB fault interrupt targeting.
   */
92b39388e   Jack Steiner   gru: generic infr...
837
  int gru_set_context_option(unsigned long arg)
142586409   Jack Steiner   GRU Driver: page ...
838
839
  {
  	struct gru_thread_state *gts;
92b39388e   Jack Steiner   gru: generic infr...
840
841
  	struct gru_set_context_option_req req;
  	int ret = 0;
142586409   Jack Steiner   GRU Driver: page ...
842

92b39388e   Jack Steiner   gru: generic infr...
843
844
845
846
847
  	STAT(set_context_option);
  	if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
  		return -EFAULT;
  	gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx
  ", req.op, req.gseg, req.val1);
c550222f6   Jack Steiner   gru: preload tlb ...
848
849
850
851
852
853
  	gts = gru_find_lock_gts(req.gseg);
  	if (!gts) {
  		gts = gru_alloc_locked_gts(req.gseg);
  		if (IS_ERR(gts))
  			return PTR_ERR(gts);
  	}
142586409   Jack Steiner   GRU Driver: page ...
854

92b39388e   Jack Steiner   gru: generic infr...
855
  	switch (req.op) {
518e5cd4a   Jack Steiner   gru: allow users ...
856
857
  	case sco_blade_chiplet:
  		/* Select blade/chiplet for GRU context */
a7d0dabb3   Dimitri Sivanich   drivers/misc/sgi-...
858
859
860
  		if (req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB ||
  		    req.val1 < -1 || req.val1 >= GRU_MAX_BLADES ||
  		    (req.val1 >= 0 && !gru_base[req.val1])) {
518e5cd4a   Jack Steiner   gru: allow users ...
861
862
863
864
  			ret = -EINVAL;
  		} else {
  			gts->ts_user_blade_id = req.val1;
  			gts->ts_user_chiplet_id = req.val0;
55484c45d   Jack Steiner   gru: allow users ...
865
  			gru_check_context_placement(gts);
518e5cd4a   Jack Steiner   gru: allow users ...
866
867
  		}
  		break;
92b39388e   Jack Steiner   gru: generic infr...
868
869
870
871
  	case sco_gseg_owner:
   		/* Register the current task as the GSEG owner */
  		gts->ts_tgid_owner = current->tgid;
  		break;
b1b19fcfa   Jack Steiner   gru: add user req...
872
873
874
875
  	case sco_cch_req_slice:
   		/* Set the CCH slice option */
  		gts->ts_cch_req_slice = req.val1 & 3;
  		break;
92b39388e   Jack Steiner   gru: generic infr...
876
877
878
  	default:
  		ret = -EINVAL;
  	}
142586409   Jack Steiner   GRU Driver: page ...
879
  	gru_unlock_gts(gts);
92b39388e   Jack Steiner   gru: generic infr...
880
  	return ret;
142586409   Jack Steiner   GRU Driver: page ...
881
  }