Blame view

tools/virtio/ringtest/main.h 4.69 KB
7a338472f   Thomas Gleixner   treewide: Replace...
1
  /* SPDX-License-Identifier: GPL-2.0-only */
481eaec37   Michael S. Tsirkin   tools/virtio: add...
2
3
4
  /*
   * Copyright (C) 2016 Red Hat, Inc.
   * Author: Michael S. Tsirkin <mst@redhat.com>
481eaec37   Michael S. Tsirkin   tools/virtio: add...
5
6
7
8
9
10
11
   *
   * Common macros and functions for ring benchmarking.
   */
  #ifndef MAIN_H
  #define MAIN_H
  
  #include <stdbool.h>
a49795054   Michael S. Tsirkin   ringtest: support...
12
  extern int param;
481eaec37   Michael S. Tsirkin   tools/virtio: add...
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
  extern bool do_exit;
  
  #if defined(__x86_64__) || defined(__i386__)
  #include "x86intrin.h"
  
  static inline void wait_cycles(unsigned long long cycles)
  {
  	unsigned long long t;
  
  	t = __rdtsc();
  	while (__rdtsc() - t < cycles) {}
  }
  
  #define VMEXIT_CYCLES 500
  #define VMENTRY_CYCLES 500
47a4c49af   Halil Pasic   tools/virtio/ring...
28
29
30
31
32
33
34
35
36
  #elif defined(__s390x__)
  static inline void wait_cycles(unsigned long long cycles)
  {
  	asm volatile("0: brctg %0,0b" : : "d" (cycles));
  }
  
  /* tweak me */
  #define VMEXIT_CYCLES 200
  #define VMENTRY_CYCLES 200
481eaec37   Michael S. Tsirkin   tools/virtio: add...
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
  #else
  static inline void wait_cycles(unsigned long long cycles)
  {
  	_Exit(5);
  }
  #define VMEXIT_CYCLES 0
  #define VMENTRY_CYCLES 0
  #endif
  
  static inline void vmexit(void)
  {
  	if (!do_exit)
  		return;
  	
  	wait_cycles(VMEXIT_CYCLES);
  }
  static inline void vmentry(void)
  {
  	if (!do_exit)
  		return;
  	
  	wait_cycles(VMENTRY_CYCLES);
  }
  
  /* implemented by ring */
  void alloc_ring(void);
  /* guest side */
  int add_inbuf(unsigned, void *, void *);
  void *get_buf(unsigned *, void **);
  void disable_call();
d3c3589b8   Paolo Bonzini   ringtest: commoni...
67
  bool used_empty();
481eaec37   Michael S. Tsirkin   tools/virtio: add...
68
69
  bool enable_call();
  void kick_available();
481eaec37   Michael S. Tsirkin   tools/virtio: add...
70
71
  /* host side */
  void disable_kick();
d3c3589b8   Paolo Bonzini   ringtest: commoni...
72
  bool avail_empty();
481eaec37   Michael S. Tsirkin   tools/virtio: add...
73
74
75
  bool enable_kick();
  bool use_buf(unsigned *, void **);
  void call_used();
481eaec37   Michael S. Tsirkin   tools/virtio: add...
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
  
  /* implemented by main */
  extern bool do_sleep;
  void kick(void);
  void wait_for_kick(void);
  void call(void);
  void wait_for_call(void);
  
  extern unsigned ring_size;
  
  /* Compiler barrier - similar to what Linux uses */
  #define barrier() asm volatile("" ::: "memory")
  
  /* Is there a portable way to do this? */
  #if defined(__x86_64__) || defined(__i386__)
  #define cpu_relax() asm ("rep; nop" ::: "memory")
47a4c49af   Halil Pasic   tools/virtio/ring...
92
93
  #elif defined(__s390x__)
  #define cpu_relax() barrier()
481eaec37   Michael S. Tsirkin   tools/virtio: add...
94
95
96
97
98
99
100
101
102
103
104
105
106
107
  #else
  #define cpu_relax() assert(0)
  #endif
  
  extern bool do_relax;
  
  static inline void busy_wait(void)
  {
  	if (do_relax)
  		cpu_relax();
  	else
  		/* prevent compiler from removing busy loops */
  		barrier();
  } 
450cbdd01   Michael S. Tsirkin   locking/x86: Use ...
108
  #if defined(__x86_64__) || defined(__i386__)
491847f3b   Michael S. Tsirkin   tools/virtio: fix...
109
  #define smp_mb()     asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
450cbdd01   Michael S. Tsirkin   locking/x86: Use ...
110
  #else
481eaec37   Michael S. Tsirkin   tools/virtio: add...
111
112
113
114
115
  /*
   * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
   * with other __ATOMIC_SEQ_CST calls.
   */
  #define smp_mb() __sync_synchronize()
450cbdd01   Michael S. Tsirkin   locking/x86: Use ...
116
  #endif
481eaec37   Michael S. Tsirkin   tools/virtio: add...
117
118
119
120
121
122
123
124
125
126
127
128
129
130
  
  /*
   * This abuses the atomic builtins for thread fences, and
   * adds a compiler barrier.
   */
  #define smp_release() do { \
      barrier(); \
      __atomic_thread_fence(__ATOMIC_RELEASE); \
  } while (0)
  
  #define smp_acquire() do { \
      __atomic_thread_fence(__ATOMIC_ACQUIRE); \
      barrier(); \
  } while (0)
b4eab7de6   Michael S. Tsirkin   tools/virtio: cop...
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
  #if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
  #define smp_wmb() barrier()
  #else
  #define smp_wmb() smp_release()
  #endif
  
  #ifdef __alpha__
  #define smp_read_barrier_depends() smp_acquire()
  #else
  #define smp_read_barrier_depends() do {} while(0)
  #endif
  
  static __always_inline
  void __read_once_size(const volatile void *p, void *res, int size)
  {
          switch (size) {                                                 \
          case 1: *(unsigned char *)res = *(volatile unsigned char *)p; break;              \
          case 2: *(unsigned short *)res = *(volatile unsigned short *)p; break;            \
          case 4: *(unsigned int *)res = *(volatile unsigned int *)p; break;            \
          case 8: *(unsigned long long *)res = *(volatile unsigned long long *)p; break;            \
          default:                                                        \
                  barrier();                                              \
                  __builtin_memcpy((void *)res, (const void *)p, size);   \
                  barrier();                                              \
          }                                                               \
  }
  
  static __always_inline void __write_once_size(volatile void *p, void *res, int size)
  {
  	switch (size) {
  	case 1: *(volatile unsigned char *)p = *(unsigned char *)res; break;
  	case 2: *(volatile unsigned short *)p = *(unsigned short *)res; break;
  	case 4: *(volatile unsigned int *)p = *(unsigned int *)res; break;
  	case 8: *(volatile unsigned long long *)p = *(unsigned long long *)res; break;
  	default:
  		barrier();
  		__builtin_memcpy((void *)p, (const void *)res, size);
  		barrier();
  	}
  }
  
  #define READ_ONCE(x) \
  ({									\
  	union { typeof(x) __val; char __c[1]; } __u;			\
  	__read_once_size(&(x), __u.__c, sizeof(x));		\
  	smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
  	__u.__val;							\
  })
  
  #define WRITE_ONCE(x, val) \
  ({							\
  	union { typeof(x) __val; char __c[1]; } __u =	\
  		{ .__val = (typeof(x)) (val) }; \
  	__write_once_size(&(x), __u.__c, sizeof(x));	\
  	__u.__val;					\
  })
481eaec37   Michael S. Tsirkin   tools/virtio: add...
187
  #endif