Blame view

lib/list_sort.c 3.58 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
2c761270d   Dave Chinner   lib: Introduce ge...
2
  #include <linux/kernel.h>
7259fa042   Rasmus Villemoes   lib/list_sort.c: ...
3
4
5
6
  #include <linux/bug.h>
  #include <linux/compiler.h>
  #include <linux/export.h>
  #include <linux/string.h>
2c761270d   Dave Chinner   lib: Introduce ge...
7
  #include <linux/list_sort.h>
2c761270d   Dave Chinner   lib: Introduce ge...
8
  #include <linux/list.h>
835cc0c84   Don Mullis   lib: more scalabl...
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
  #define MAX_LIST_LENGTH_BITS 20
  
  /*
   * Returns a list organized in an intermediate format suited
   * to chaining of merge() calls: null-terminated, no reserved or
   * sentinel head node, "prev" links not maintained.
   */
  static struct list_head *merge(void *priv,
  				int (*cmp)(void *priv, struct list_head *a,
  					struct list_head *b),
  				struct list_head *a, struct list_head *b)
  {
  	struct list_head head, *tail = &head;
  
  	while (a && b) {
  		/* if equal, take 'a' -- important for sort stability */
  		if ((*cmp)(priv, a, b) <= 0) {
  			tail->next = a;
  			a = a->next;
  		} else {
  			tail->next = b;
  			b = b->next;
  		}
  		tail = tail->next;
  	}
  	tail->next = a?:b;
  	return head.next;
  }
  
  /*
   * Combine final list merge with restoration of standard doubly-linked
   * list structure.  This approach duplicates code from merge(), but
   * runs faster than the tidier alternatives of either a separate final
   * prev-link restoration pass, or maintaining the prev links
   * throughout.
   */
  static void merge_and_restore_back_links(void *priv,
  				int (*cmp)(void *priv, struct list_head *a,
  					struct list_head *b),
  				struct list_head *head,
  				struct list_head *a, struct list_head *b)
  {
  	struct list_head *tail = head;
61b3d6c48   Rasmus Villemoes   lib: list_sort.c:...
52
  	u8 count = 0;
835cc0c84   Don Mullis   lib: more scalabl...
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
  
  	while (a && b) {
  		/* if equal, take 'a' -- important for sort stability */
  		if ((*cmp)(priv, a, b) <= 0) {
  			tail->next = a;
  			a->prev = tail;
  			a = a->next;
  		} else {
  			tail->next = b;
  			b->prev = tail;
  			b = b->next;
  		}
  		tail = tail->next;
  	}
  	tail->next = a ? : b;
  
  	do {
  		/*
  		 * In worst cases this loop may run many iterations.
  		 * Continue callbacks to the client even though no
  		 * element comparison is needed, so the client's cmp()
  		 * routine can invoke cond_resched() periodically.
  		 */
61b3d6c48   Rasmus Villemoes   lib: list_sort.c:...
76
77
  		if (unlikely(!(++count)))
  			(*cmp)(priv, tail->next, tail->next);
835cc0c84   Don Mullis   lib: more scalabl...
78
79
80
81
82
83
84
85
  
  		tail->next->prev = tail;
  		tail = tail->next;
  	} while (tail->next);
  
  	tail->next = head;
  	head->prev = tail;
  }
2c761270d   Dave Chinner   lib: Introduce ge...
86
  /**
02b12b7a2   Don Mullis   lib: revise list_...
87
88
   * list_sort - sort a list
   * @priv: private data, opaque to list_sort(), passed to @cmp
2c761270d   Dave Chinner   lib: Introduce ge...
89
90
91
   * @head: the list to sort
   * @cmp: the elements comparison function
   *
02b12b7a2   Don Mullis   lib: revise list_...
92
93
   * This function implements "merge sort", which has O(nlog(n))
   * complexity.
2c761270d   Dave Chinner   lib: Introduce ge...
94
   *
02b12b7a2   Don Mullis   lib: revise list_...
95
96
97
98
   * The comparison function @cmp must return a negative value if @a
   * should sort before @b, and a positive value if @a should sort after
   * @b. If @a and @b are equivalent, and their original relative
   * ordering is to be preserved, @cmp must return 0.
2c761270d   Dave Chinner   lib: Introduce ge...
99
100
   */
  void list_sort(void *priv, struct list_head *head,
835cc0c84   Don Mullis   lib: more scalabl...
101
102
  		int (*cmp)(void *priv, struct list_head *a,
  			struct list_head *b))
2c761270d   Dave Chinner   lib: Introduce ge...
103
  {
835cc0c84   Don Mullis   lib: more scalabl...
104
105
106
107
108
  	struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists
  						-- last slot is a sentinel */
  	int lev;  /* index into part[] */
  	int max_lev = 0;
  	struct list_head *list;
2c761270d   Dave Chinner   lib: Introduce ge...
109
110
111
  
  	if (list_empty(head))
  		return;
835cc0c84   Don Mullis   lib: more scalabl...
112
113
114
  	memset(part, 0, sizeof(part));
  
  	head->prev->next = NULL;
2c761270d   Dave Chinner   lib: Introduce ge...
115
  	list = head->next;
2c761270d   Dave Chinner   lib: Introduce ge...
116

835cc0c84   Don Mullis   lib: more scalabl...
117
118
119
120
121
122
123
124
125
126
127
  	while (list) {
  		struct list_head *cur = list;
  		list = list->next;
  		cur->next = NULL;
  
  		for (lev = 0; part[lev]; lev++) {
  			cur = merge(priv, cmp, part[lev], cur);
  			part[lev] = NULL;
  		}
  		if (lev > max_lev) {
  			if (unlikely(lev >= ARRAY_SIZE(part)-1)) {
d0da23b0d   Andrew Morton   lib/list_sort.c: ...
128
129
  				printk_once(KERN_DEBUG "list too long for efficiency
  ");
835cc0c84   Don Mullis   lib: more scalabl...
130
  				lev--;
2c761270d   Dave Chinner   lib: Introduce ge...
131
  			}
835cc0c84   Don Mullis   lib: more scalabl...
132
  			max_lev = lev;
2c761270d   Dave Chinner   lib: Introduce ge...
133
  		}
835cc0c84   Don Mullis   lib: more scalabl...
134
135
  		part[lev] = cur;
  	}
2c761270d   Dave Chinner   lib: Introduce ge...
136

835cc0c84   Don Mullis   lib: more scalabl...
137
138
139
  	for (lev = 0; lev < max_lev; lev++)
  		if (part[lev])
  			list = merge(priv, cmp, part[lev], list);
2c761270d   Dave Chinner   lib: Introduce ge...
140

835cc0c84   Don Mullis   lib: more scalabl...
141
142
143
  	merge_and_restore_back_links(priv, cmp, head, part[max_lev], list);
  }
  EXPORT_SYMBOL(list_sort);