Commit 847088079162a5cf8ab0d1ad1ecf7fa60c057246

Authored by Jeremy Fitzhardinge
1 parent c796f213a6

xen/trace: add mmu tracepoints

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>

Showing 2 changed files with 247 additions and 2 deletions Side-by-side Diff

... ... @@ -48,6 +48,8 @@
48 48 #include <linux/memblock.h>
49 49 #include <linux/seq_file.h>
50 50  
  51 +#include <trace/events/xen.h>
  52 +
51 53 #include <asm/pgtable.h>
52 54 #include <asm/tlbflush.h>
53 55 #include <asm/fixmap.h>
... ... @@ -194,6 +196,8 @@
194 196 struct multicall_space mcs;
195 197 struct mmu_update *u;
196 198  
  199 + trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
  200 +
197 201 mcs = xen_mc_entry(sizeof(*u));
198 202 u = mcs.args;
199 203  
... ... @@ -245,6 +249,8 @@
245 249  
246 250 static void xen_set_pmd(pmd_t *ptr, pmd_t val)
247 251 {
  252 + trace_xen_mmu_set_pmd(ptr, val);
  253 +
248 254 /* If page is not pinned, we can just update the entry
249 255 directly */
250 256 if (!xen_page_pinned(ptr)) {
251 257  
252 258  
253 259  
... ... @@ -282,22 +288,30 @@
282 288 return true;
283 289 }
284 290  
285   -static void xen_set_pte(pte_t *ptep, pte_t pteval)
  291 +static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
286 292 {
287 293 if (!xen_batched_set_pte(ptep, pteval))
288 294 native_set_pte(ptep, pteval);
289 295 }
290 296  
  297 +static void xen_set_pte(pte_t *ptep, pte_t pteval)
  298 +{
  299 + trace_xen_mmu_set_pte(ptep, pteval);
  300 + __xen_set_pte(ptep, pteval);
  301 +}
  302 +
291 303 static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
292 304 pte_t *ptep, pte_t pteval)
293 305 {
294   - xen_set_pte(ptep, pteval);
  306 + trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
  307 + __xen_set_pte(ptep, pteval);
295 308 }
296 309  
297 310 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
298 311 unsigned long addr, pte_t *ptep)
299 312 {
300 313 /* Just return the pte as-is. We preserve the bits on commit */
  314 + trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
301 315 return *ptep;
302 316 }
303 317  
... ... @@ -306,6 +320,7 @@
306 320 {
307 321 struct mmu_update u;
308 322  
  323 + trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
309 324 xen_mc_batch();
310 325  
311 326 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
... ... @@ -530,6 +545,8 @@
530 545  
531 546 static void xen_set_pud(pud_t *ptr, pud_t val)
532 547 {
  548 + trace_xen_mmu_set_pud(ptr, val);
  549 +
533 550 /* If page is not pinned, we can just update the entry
534 551 directly */
535 552 if (!xen_page_pinned(ptr)) {
536 553  
537 554  
... ... @@ -543,17 +560,20 @@
543 560 #ifdef CONFIG_X86_PAE
544 561 static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
545 562 {
  563 + trace_xen_mmu_set_pte_atomic(ptep, pte);
546 564 set_64bit((u64 *)ptep, native_pte_val(pte));
547 565 }
548 566  
549 567 static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
550 568 {
  569 + trace_xen_mmu_pte_clear(mm, addr, ptep);
551 570 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
552 571 native_pte_clear(mm, addr, ptep);
553 572 }
554 573  
555 574 static void xen_pmd_clear(pmd_t *pmdp)
556 575 {
  576 + trace_xen_mmu_pmd_clear(pmdp);
557 577 set_pmd(pmdp, __pmd(0));
558 578 }
559 579 #endif /* CONFIG_X86_PAE */
... ... @@ -628,6 +648,8 @@
628 648 static void xen_set_pgd(pgd_t *ptr, pgd_t val)
629 649 {
630 650 pgd_t *user_ptr = xen_get_user_pgd(ptr);
  651 +
  652 + trace_xen_mmu_set_pgd(ptr, user_ptr, val);
631 653  
632 654 /* If page is not pinned, we can just update the entry
633 655 directly */
include/trace/events/xen.h
... ... @@ -125,6 +125,229 @@
125 125 __entry->res == XEN_MC_XE_BAD_OP ? "BAD_OP" :
126 126 __entry->res == XEN_MC_XE_NO_SPACE ? "NO_SPACE" : "???")
127 127 );
  128 +
  129 +/* mmu */
  130 +TRACE_EVENT(xen_mmu_set_pte,
  131 + TP_PROTO(pte_t *ptep, pte_t pteval),
  132 + TP_ARGS(ptep, pteval),
  133 + TP_STRUCT__entry(
  134 + __field(pte_t *, ptep)
  135 + __field(pteval_t, pteval)
  136 + ),
  137 + TP_fast_assign(__entry->ptep = ptep;
  138 + __entry->pteval = pteval.pte),
  139 + TP_printk("ptep %p pteval %0*llx (raw %0*llx)",
  140 + __entry->ptep,
  141 + (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
  142 + (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
  143 + );
  144 +
  145 +TRACE_EVENT(xen_mmu_set_pte_atomic,
  146 + TP_PROTO(pte_t *ptep, pte_t pteval),
  147 + TP_ARGS(ptep, pteval),
  148 + TP_STRUCT__entry(
  149 + __field(pte_t *, ptep)
  150 + __field(pteval_t, pteval)
  151 + ),
  152 + TP_fast_assign(__entry->ptep = ptep;
  153 + __entry->pteval = pteval.pte),
  154 + TP_printk("ptep %p pteval %0*llx (raw %0*llx)",
  155 + __entry->ptep,
  156 + (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
  157 + (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
  158 + );
  159 +
  160 +TRACE_EVENT(xen_mmu_set_domain_pte,
  161 + TP_PROTO(pte_t *ptep, pte_t pteval, unsigned domid),
  162 + TP_ARGS(ptep, pteval, domid),
  163 + TP_STRUCT__entry(
  164 + __field(pte_t *, ptep)
  165 + __field(pteval_t, pteval)
  166 + __field(unsigned, domid)
  167 + ),
  168 + TP_fast_assign(__entry->ptep = ptep;
  169 + __entry->pteval = pteval.pte;
  170 + __entry->domid = domid),
  171 + TP_printk("ptep %p pteval %0*llx (raw %0*llx) domid %u",
  172 + __entry->ptep,
  173 + (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
  174 + (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval,
  175 + __entry->domid)
  176 + );
  177 +
  178 +TRACE_EVENT(xen_mmu_set_pte_at,
  179 + TP_PROTO(struct mm_struct *mm, unsigned long addr,
  180 + pte_t *ptep, pte_t pteval),
  181 + TP_ARGS(mm, addr, ptep, pteval),
  182 + TP_STRUCT__entry(
  183 + __field(struct mm_struct *, mm)
  184 + __field(unsigned long, addr)
  185 + __field(pte_t *, ptep)
  186 + __field(pteval_t, pteval)
  187 + ),
  188 + TP_fast_assign(__entry->mm = mm;
  189 + __entry->addr = addr;
  190 + __entry->ptep = ptep;
  191 + __entry->pteval = pteval.pte),
  192 + TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
  193 + __entry->mm, __entry->addr, __entry->ptep,
  194 + (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
  195 + (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
  196 + );
  197 +
  198 +TRACE_EVENT(xen_mmu_pte_clear,
  199 + TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
  200 + TP_ARGS(mm, addr, ptep),
  201 + TP_STRUCT__entry(
  202 + __field(struct mm_struct *, mm)
  203 + __field(unsigned long, addr)
  204 + __field(pte_t *, ptep)
  205 + ),
  206 + TP_fast_assign(__entry->mm = mm;
  207 + __entry->addr = addr;
  208 + __entry->ptep = ptep),
  209 + TP_printk("mm %p addr %lx ptep %p",
  210 + __entry->mm, __entry->addr, __entry->ptep)
  211 + );
  212 +
  213 +TRACE_EVENT(xen_mmu_set_pmd,
  214 + TP_PROTO(pmd_t *pmdp, pmd_t pmdval),
  215 + TP_ARGS(pmdp, pmdval),
  216 + TP_STRUCT__entry(
  217 + __field(pmd_t *, pmdp)
  218 + __field(pmdval_t, pmdval)
  219 + ),
  220 + TP_fast_assign(__entry->pmdp = pmdp;
  221 + __entry->pmdval = pmdval.pmd),
  222 + TP_printk("pmdp %p pmdval %0*llx (raw %0*llx)",
  223 + __entry->pmdp,
  224 + (int)sizeof(pmdval_t) * 2, (unsigned long long)pmd_val(native_make_pmd(__entry->pmdval)),
  225 + (int)sizeof(pmdval_t) * 2, (unsigned long long)__entry->pmdval)
  226 + );
  227 +
  228 +TRACE_EVENT(xen_mmu_pmd_clear,
  229 + TP_PROTO(pmd_t *pmdp),
  230 + TP_ARGS(pmdp),
  231 + TP_STRUCT__entry(
  232 + __field(pmd_t *, pmdp)
  233 + ),
  234 + TP_fast_assign(__entry->pmdp = pmdp),
  235 + TP_printk("pmdp %p", __entry->pmdp)
  236 + );
  237 +
  238 +#if PAGETABLE_LEVELS >= 4
  239 +
  240 +TRACE_EVENT(xen_mmu_set_pud,
  241 + TP_PROTO(pud_t *pudp, pud_t pudval),
  242 + TP_ARGS(pudp, pudval),
  243 + TP_STRUCT__entry(
  244 + __field(pud_t *, pudp)
  245 + __field(pudval_t, pudval)
  246 + ),
  247 + TP_fast_assign(__entry->pudp = pudp;
  248 + __entry->pudval = native_pud_val(pudval)),
  249 + TP_printk("pudp %p pudval %0*llx (raw %0*llx)",
  250 + __entry->pudp,
  251 + (int)sizeof(pudval_t) * 2, (unsigned long long)pud_val(native_make_pud(__entry->pudval)),
  252 + (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
  253 + );
  254 +
  255 +TRACE_EVENT(xen_mmu_set_pgd,
  256 + TP_PROTO(pgd_t *pgdp, pgd_t *user_pgdp, pgd_t pgdval),
  257 + TP_ARGS(pgdp, user_pgdp, pgdval),
  258 + TP_STRUCT__entry(
  259 + __field(pgd_t *, pgdp)
  260 + __field(pgd_t *, user_pgdp)
  261 + __field(pgdval_t, pgdval)
  262 + ),
  263 + TP_fast_assign(__entry->pgdp = pgdp;
  264 + __entry->user_pgdp = user_pgdp;
  265 + __entry->pgdval = pgdval.pgd),
  266 + TP_printk("pgdp %p user_pgdp %p pgdval %0*llx (raw %0*llx)",
  267 + __entry->pgdp, __entry->user_pgdp,
  268 + (int)sizeof(pgdval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pgdval)),
  269 + (int)sizeof(pgdval_t) * 2, (unsigned long long)__entry->pgdval)
  270 + );
  271 +
  272 +TRACE_EVENT(xen_mmu_pud_clear,
  273 + TP_PROTO(pud_t *pudp),
  274 + TP_ARGS(pudp),
  275 + TP_STRUCT__entry(
  276 + __field(pud_t *, pudp)
  277 + ),
  278 + TP_fast_assign(__entry->pudp = pudp),
  279 + TP_printk("pudp %p", __entry->pudp)
  280 + );
  281 +#else
  282 +
  283 +TRACE_EVENT(xen_mmu_set_pud,
  284 + TP_PROTO(pud_t *pudp, pud_t pudval),
  285 + TP_ARGS(pudp, pudval),
  286 + TP_STRUCT__entry(
  287 + __field(pud_t *, pudp)
  288 + __field(pudval_t, pudval)
  289 + ),
  290 + TP_fast_assign(__entry->pudp = pudp;
  291 + __entry->pudval = native_pud_val(pudval)),
  292 + TP_printk("pudp %p pudval %0*llx (raw %0*llx)",
  293 + __entry->pudp,
  294 + (int)sizeof(pudval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pudval)),
  295 + (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
  296 + );
  297 +
  298 +#endif
  299 +
  300 +TRACE_EVENT(xen_mmu_pgd_clear,
  301 + TP_PROTO(pgd_t *pgdp),
  302 + TP_ARGS(pgdp),
  303 + TP_STRUCT__entry(
  304 + __field(pgd_t *, pgdp)
  305 + ),
  306 + TP_fast_assign(__entry->pgdp = pgdp),
  307 + TP_printk("pgdp %p", __entry->pgdp)
  308 + );
  309 +
  310 +TRACE_EVENT(xen_mmu_ptep_modify_prot_start,
  311 + TP_PROTO(struct mm_struct *mm, unsigned long addr,
  312 + pte_t *ptep, pte_t pteval),
  313 + TP_ARGS(mm, addr, ptep, pteval),
  314 + TP_STRUCT__entry(
  315 + __field(struct mm_struct *, mm)
  316 + __field(unsigned long, addr)
  317 + __field(pte_t *, ptep)
  318 + __field(pteval_t, pteval)
  319 + ),
  320 + TP_fast_assign(__entry->mm = mm;
  321 + __entry->addr = addr;
  322 + __entry->ptep = ptep;
  323 + __entry->pteval = pteval.pte),
  324 + TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
  325 + __entry->mm, __entry->addr, __entry->ptep,
  326 + (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
  327 + (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
  328 + );
  329 +
  330 +TRACE_EVENT(xen_mmu_ptep_modify_prot_commit,
  331 + TP_PROTO(struct mm_struct *mm, unsigned long addr,
  332 + pte_t *ptep, pte_t pteval),
  333 + TP_ARGS(mm, addr, ptep, pteval),
  334 + TP_STRUCT__entry(
  335 + __field(struct mm_struct *, mm)
  336 + __field(unsigned long, addr)
  337 + __field(pte_t *, ptep)
  338 + __field(pteval_t, pteval)
  339 + ),
  340 + TP_fast_assign(__entry->mm = mm;
  341 + __entry->addr = addr;
  342 + __entry->ptep = ptep;
  343 + __entry->pteval = pteval.pte),
  344 + TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
  345 + __entry->mm, __entry->addr, __entry->ptep,
  346 + (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
  347 + (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
  348 + );
  349 +
  350 +
128 351 #endif /* _TRACE_XEN_H */
129 352  
130 353 /* This part must be outside protection */