Blame view

include/linux/pagewalk.h 4.06 KB
a520110e4   Christoph Hellwig   mm: split out a n...
1
2
3
4
5
  /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _LINUX_PAGEWALK_H
  #define _LINUX_PAGEWALK_H
  
  #include <linux/mm.h>
7b86ac337   Christoph Hellwig   pagewalk: separat...
6
  struct mm_walk;
a520110e4   Christoph Hellwig   mm: split out a n...
7
  /**
7b86ac337   Christoph Hellwig   pagewalk: separat...
8
   * mm_walk_ops - callbacks for walk_page_range
3afc42363   Steven Price   mm: pagewalk: add...
9
10
11
12
   * @pgd_entry:		if set, called for each non-empty PGD (top-level) entry
   * @p4d_entry:		if set, called for each non-empty P4D entry
   * @pud_entry:		if set, called for each non-empty PUD entry
   * @pmd_entry:		if set, called for each non-empty PMD entry
7b86ac337   Christoph Hellwig   pagewalk: separat...
13
14
15
   *			this handler is required to be able to handle
   *			pmd_trans_huge() pmds.  They may simply choose to
   *			split_huge_page() instead of handling it explicitly.
3afc42363   Steven Price   mm: pagewalk: add...
16
17
   * @pte_entry:		if set, called for each non-empty PTE (lowest-level)
   *			entry
b7a16c7ad   Steven Price   mm: pagewalk: add...
18
19
20
21
   * @pte_hole:		if set, called for each hole at all levels,
   *			depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD
   *			4:PTE. Any folded depths (where PTRS_PER_P?D is equal
   *			to 1) are skipped.
7b86ac337   Christoph Hellwig   pagewalk: separat...
22
23
24
25
26
27
   * @hugetlb_entry:	if set, called for each hugetlb entry
   * @test_walk:		caller specific callback function to determine whether
   *			we walk over the current vma or not. Returning 0 means
   *			"do page table walk over the current vma", returning
   *			a negative value means "abort current page table walk
   *			right now" and returning 1 means "skip the current vma"
ecaad8aca   Thomas Hellstrom   mm: Add a walk_pa...
28
29
30
   * @pre_vma:            if set, called before starting walk on a non-null vma.
   * @post_vma:           if set, called after a walk on a non-null vma, provided
   *                      that @pre_vma and the vma walk succeeded.
3afc42363   Steven Price   mm: pagewalk: add...
31
32
33
   *
   * p?d_entry callbacks are called even if those levels are folded on a
   * particular architecture/configuration.
a520110e4   Christoph Hellwig   mm: split out a n...
34
   */
7b86ac337   Christoph Hellwig   pagewalk: separat...
35
  struct mm_walk_ops {
3afc42363   Steven Price   mm: pagewalk: add...
36
37
38
39
  	int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
  			 unsigned long next, struct mm_walk *walk);
  	int (*p4d_entry)(p4d_t *p4d, unsigned long addr,
  			 unsigned long next, struct mm_walk *walk);
a520110e4   Christoph Hellwig   mm: split out a n...
40
41
42
43
44
45
46
  	int (*pud_entry)(pud_t *pud, unsigned long addr,
  			 unsigned long next, struct mm_walk *walk);
  	int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
  			 unsigned long next, struct mm_walk *walk);
  	int (*pte_entry)(pte_t *pte, unsigned long addr,
  			 unsigned long next, struct mm_walk *walk);
  	int (*pte_hole)(unsigned long addr, unsigned long next,
b7a16c7ad   Steven Price   mm: pagewalk: add...
47
  			int depth, struct mm_walk *walk);
a520110e4   Christoph Hellwig   mm: split out a n...
48
49
50
51
52
  	int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
  			     unsigned long addr, unsigned long next,
  			     struct mm_walk *walk);
  	int (*test_walk)(unsigned long addr, unsigned long next,
  			struct mm_walk *walk);
ecaad8aca   Thomas Hellstrom   mm: Add a walk_pa...
53
54
55
  	int (*pre_vma)(unsigned long start, unsigned long end,
  		       struct mm_walk *walk);
  	void (*post_vma)(struct mm_walk *walk);
7b86ac337   Christoph Hellwig   pagewalk: separat...
56
  };
3afc42363   Steven Price   mm: pagewalk: add...
57
58
59
60
61
62
63
64
65
66
67
68
  /*
   * Action for pud_entry / pmd_entry callbacks.
   * ACTION_SUBTREE is the default
   */
  enum page_walk_action {
  	/* Descend to next level, splitting huge pages if needed and possible */
  	ACTION_SUBTREE = 0,
  	/* Continue to next entry at this level (ignoring any subtree) */
  	ACTION_CONTINUE = 1,
  	/* Call again for this entry */
  	ACTION_AGAIN = 2
  };
7b86ac337   Christoph Hellwig   pagewalk: separat...
69
70
71
72
  /**
   * mm_walk - walk_page_range data
   * @ops:	operation to call during the walk
   * @mm:		mm_struct representing the target process of page table walk
e47690d75   Steven Price   x86: mm: avoid al...
73
   * @pgd:	pointer to PGD; only valid with no_vma (otherwise set to NULL)
7b86ac337   Christoph Hellwig   pagewalk: separat...
74
   * @vma:	vma currently walked (NULL if walking outside vmas)
3afc42363   Steven Price   mm: pagewalk: add...
75
   * @action:	next action to perform (see enum page_walk_action)
488ae6a2b   Steven Price   mm: pagewalk: all...
76
   * @no_vma:	walk ignoring vmas (vma will always be NULL)
7b86ac337   Christoph Hellwig   pagewalk: separat...
77
78
79
80
81
82
   * @private:	private data for callbacks' usage
   *
   * (see the comment on walk_page_range() for more details)
   */
  struct mm_walk {
  	const struct mm_walk_ops *ops;
a520110e4   Christoph Hellwig   mm: split out a n...
83
  	struct mm_struct *mm;
e47690d75   Steven Price   x86: mm: avoid al...
84
  	pgd_t *pgd;
a520110e4   Christoph Hellwig   mm: split out a n...
85
  	struct vm_area_struct *vma;
3afc42363   Steven Price   mm: pagewalk: add...
86
  	enum page_walk_action action;
488ae6a2b   Steven Price   mm: pagewalk: all...
87
  	bool no_vma;
a520110e4   Christoph Hellwig   mm: split out a n...
88
89
  	void *private;
  };
7b86ac337   Christoph Hellwig   pagewalk: separat...
90
91
92
  int walk_page_range(struct mm_struct *mm, unsigned long start,
  		unsigned long end, const struct mm_walk_ops *ops,
  		void *private);
488ae6a2b   Steven Price   mm: pagewalk: all...
93
94
  int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
  			  unsigned long end, const struct mm_walk_ops *ops,
e47690d75   Steven Price   x86: mm: avoid al...
95
  			  pgd_t *pgd,
488ae6a2b   Steven Price   mm: pagewalk: all...
96
  			  void *private);
7b86ac337   Christoph Hellwig   pagewalk: separat...
97
98
  int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
  		void *private);
ecaad8aca   Thomas Hellstrom   mm: Add a walk_pa...
99
100
101
  int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
  		      pgoff_t nr, const struct mm_walk_ops *ops,
  		      void *private);
a520110e4   Christoph Hellwig   mm: split out a n...
102
103
  
  #endif /* _LINUX_PAGEWALK_H */