Revert "kthread: add kthread_work tracepoints"

This reverts commit 0944044e57.
This commit is contained in:
Ksawlii 2024-11-24 00:23:23 +01:00
parent e6affe90e3
commit 9629f5d149
2 changed files with 0 additions and 93 deletions

View file

@ -5,7 +5,6 @@
#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SCHED_H #define _TRACE_SCHED_H
#include <linux/kthread.h>
#include <linux/sched/numa_balancing.h> #include <linux/sched/numa_balancing.h>
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <linux/binfmts.h> #include <linux/binfmts.h>
@ -52,89 +51,6 @@ TRACE_EVENT(sched_kthread_stop_ret,
TP_printk("ret=%d", __entry->ret) TP_printk("ret=%d", __entry->ret)
); );
/**
* sched_kthread_work_queue_work - called when a work gets queued
* @worker: pointer to the kthread_worker
* @work: pointer to struct kthread_work
*
* This event occurs when a work is queued immediately or once a
* delayed work is actually queued (ie: once the delay has been
* reached).
*/
TRACE_EVENT(sched_kthread_work_queue_work,
TP_PROTO(struct kthread_worker *worker,
struct kthread_work *work),
TP_ARGS(worker, work),
TP_STRUCT__entry(
__field( void *, work )
__field( void *, function)
__field( void *, worker)
),
TP_fast_assign(
__entry->work = work;
__entry->function = work->func;
__entry->worker = worker;
),
TP_printk("work struct=%p function=%ps worker=%p",
__entry->work, __entry->function, __entry->worker)
);
/**
* sched_kthread_work_execute_start - called immediately before the work callback
* @work: pointer to struct kthread_work
*
* Allows to track kthread work execution.
*/
TRACE_EVENT(sched_kthread_work_execute_start,
TP_PROTO(struct kthread_work *work),
TP_ARGS(work),
TP_STRUCT__entry(
__field( void *, work )
__field( void *, function)
),
TP_fast_assign(
__entry->work = work;
__entry->function = work->func;
),
TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
);
/**
* sched_kthread_work_execute_end - called immediately after the work callback
* @work: pointer to struct work_struct
* @function: pointer to worker function
*
* Allows to track workqueue execution.
*/
TRACE_EVENT(sched_kthread_work_execute_end,
TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
TP_ARGS(work, function),
TP_STRUCT__entry(
__field( void *, work )
__field( void *, function)
),
TP_fast_assign(
__entry->work = work;
__entry->function = function;
),
TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
);
/* /*
* Tracepoint for waking up a task: * Tracepoint for waking up a task:
*/ */

View file

@ -774,15 +774,8 @@ repeat:
raw_spin_unlock_irq(&worker->lock); raw_spin_unlock_irq(&worker->lock);
if (work) { if (work) {
kthread_work_func_t func = work->func;
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
trace_sched_kthread_work_execute_start(work);
work->func(work); work->func(work);
/*
* Avoid dereferencing work after this point. The trace
* event only cares about the address.
*/
trace_sched_kthread_work_execute_end(work, func);
} else if (!freezing(current)) } else if (!freezing(current))
schedule(); schedule();
@ -911,8 +904,6 @@ static void kthread_insert_work(struct kthread_worker *worker,
{ {
kthread_insert_work_sanity_check(worker, work); kthread_insert_work_sanity_check(worker, work);
trace_sched_kthread_work_queue_work(worker, work);
list_add_tail(&work->node, pos); list_add_tail(&work->node, pos);
work->worker = worker; work->worker = worker;
if (!worker->current_work && likely(worker->task)) if (!worker->current_work && likely(worker->task))