2013-03-15 13:58:11 +08:00
|
|
|
#include "evlist.h"
|
|
|
|
#include "evsel.h"
|
|
|
|
#include "thread_map.h"
|
|
|
|
#include "cpumap.h"
|
|
|
|
#include "tests.h"
|
|
|
|
|
|
|
|
#include <signal.h>
|
|
|
|
|
|
|
|
static int exited;
|
|
|
|
static int nr_exit;
|
|
|
|
|
2014-01-04 01:56:49 +08:00
|
|
|
static void sig_handler(int sig __maybe_unused)
|
2013-03-15 13:58:11 +08:00
|
|
|
{
|
|
|
|
exited = 1;
|
2014-01-04 01:56:49 +08:00
|
|
|
}
|
2013-03-15 13:58:11 +08:00
|
|
|
|
2014-01-04 01:56:49 +08:00
|
|
|
/*
|
|
|
|
* perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
|
|
|
|
* we asked by setting its exec_error to this handler.
|
|
|
|
*/
|
|
|
|
static void workload_exec_failed_signal(int signo __maybe_unused,
|
|
|
|
siginfo_t *info __maybe_unused,
|
|
|
|
void *ucontext __maybe_unused)
|
|
|
|
{
|
|
|
|
exited = 1;
|
|
|
|
nr_exit = -1;
|
2013-03-15 13:58:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This test will start a workload that does nothing then it checks
|
|
|
|
* if the number of exit event reported by the kernel is 1 or not
|
|
|
|
* in order to check the kernel returns correct number of event.
|
|
|
|
*/
|
perf tests: Pass the subtest index to each test routine
Some tests have sub-tests we want to run, so allow passing this.
Wang tried to avoid having to touch all tests, but then, having the
test.func in an anonymous union makes the build fail on older compilers,
like the one in RHEL6, where:
test a = {
.func = foo,
};
fails.
To fix it leave the func pointer in the main structure and pass the subtest
index to all tests, end result function is the same, but we have just one
function pointer, not two, with and without the subtest index as an argument.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/n/tip-5genj0ficwdmelpoqlds0u4y@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-11-19 23:01:48 +08:00
|
|
|
int test__task_exit(int subtest __maybe_unused)
|
2013-03-15 13:58:11 +08:00
|
|
|
{
|
|
|
|
int err = -1;
|
|
|
|
union perf_event *event;
|
|
|
|
struct perf_evsel *evsel;
|
|
|
|
struct perf_evlist *evlist;
|
2013-11-13 03:46:16 +08:00
|
|
|
struct target target = {
|
2013-03-15 13:58:11 +08:00
|
|
|
.uid = UINT_MAX,
|
|
|
|
.uses_mmap = true,
|
|
|
|
};
|
|
|
|
const char *argv[] = { "true", NULL };
|
2014-08-14 10:22:45 +08:00
|
|
|
char sbuf[STRERR_BUFSIZE];
|
2015-09-08 15:59:01 +08:00
|
|
|
struct cpu_map *cpus;
|
|
|
|
struct thread_map *threads;
|
2013-03-15 13:58:11 +08:00
|
|
|
|
|
|
|
signal(SIGCHLD, sig_handler);
|
|
|
|
|
2013-09-01 18:36:14 +08:00
|
|
|
evlist = perf_evlist__new_default();
|
2013-03-15 13:58:11 +08:00
|
|
|
if (evlist == NULL) {
|
2013-09-01 18:36:14 +08:00
|
|
|
pr_debug("perf_evlist__new_default\n");
|
2013-03-15 13:58:11 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create maps of threads and cpus to monitor. In this case
|
|
|
|
* we start with all threads and cpus (-1, -1) but then in
|
|
|
|
* perf_evlist__prepare_workload we'll fill in the only thread
|
|
|
|
* we're monitoring, the one forked there.
|
|
|
|
*/
|
2015-09-08 15:59:01 +08:00
|
|
|
cpus = cpu_map__dummy_new();
|
|
|
|
threads = thread_map__new_by_tid(-1);
|
|
|
|
if (!cpus || !threads) {
|
2013-03-15 13:58:11 +08:00
|
|
|
err = -ENOMEM;
|
|
|
|
pr_debug("Not enough memory to create thread/cpu maps\n");
|
2015-09-08 15:59:01 +08:00
|
|
|
goto out_free_maps;
|
2013-03-15 13:58:11 +08:00
|
|
|
}
|
|
|
|
|
2015-09-08 15:59:01 +08:00
|
|
|
perf_evlist__set_maps(evlist, cpus, threads);
|
|
|
|
|
|
|
|
cpus = NULL;
|
|
|
|
threads = NULL;
|
|
|
|
|
2014-01-04 01:56:49 +08:00
|
|
|
err = perf_evlist__prepare_workload(evlist, &target, argv, false,
|
|
|
|
workload_exec_failed_signal);
|
2013-03-15 13:58:11 +08:00
|
|
|
if (err < 0) {
|
|
|
|
pr_debug("Couldn't run the workload!\n");
|
2014-01-04 02:56:06 +08:00
|
|
|
goto out_delete_evlist;
|
2013-03-15 13:58:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
evsel = perf_evlist__first(evlist);
|
|
|
|
evsel->attr.task = 1;
|
|
|
|
evsel->attr.sample_freq = 0;
|
|
|
|
evsel->attr.inherit = 0;
|
|
|
|
evsel->attr.watermark = 0;
|
|
|
|
evsel->attr.wakeup_events = 1;
|
|
|
|
evsel->attr.exclude_kernel = 1;
|
|
|
|
|
|
|
|
err = perf_evlist__open(evlist);
|
|
|
|
if (err < 0) {
|
2014-08-14 10:22:45 +08:00
|
|
|
pr_debug("Couldn't open the evlist: %s\n",
|
|
|
|
strerror_r(-err, sbuf, sizeof(sbuf)));
|
2014-01-04 02:56:06 +08:00
|
|
|
goto out_delete_evlist;
|
2013-03-15 13:58:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (perf_evlist__mmap(evlist, 128, true) < 0) {
|
|
|
|
pr_debug("failed to mmap events: %d (%s)\n", errno,
|
2014-08-14 10:22:45 +08:00
|
|
|
strerror_r(errno, sbuf, sizeof(sbuf)));
|
2014-01-04 03:54:12 +08:00
|
|
|
goto out_delete_evlist;
|
2013-03-15 13:58:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
perf_evlist__start_workload(evlist);
|
|
|
|
|
|
|
|
retry:
|
|
|
|
while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
|
2013-10-24 15:43:33 +08:00
|
|
|
if (event->header.type == PERF_RECORD_EXIT)
|
|
|
|
nr_exit++;
|
2013-03-15 13:58:11 +08:00
|
|
|
|
2013-10-24 15:43:33 +08:00
|
|
|
perf_evlist__mmap_consume(evlist, 0);
|
2013-03-15 13:58:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!exited || !nr_exit) {
|
2014-08-19 04:25:59 +08:00
|
|
|
perf_evlist__poll(evlist, -1);
|
2013-03-15 13:58:11 +08:00
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nr_exit != 1) {
|
|
|
|
pr_debug("received %d EXIT records\n", nr_exit);
|
|
|
|
err = -1;
|
|
|
|
}
|
|
|
|
|
2015-09-08 15:59:01 +08:00
|
|
|
out_free_maps:
|
|
|
|
cpu_map__put(cpus);
|
|
|
|
thread_map__put(threads);
|
2014-01-04 02:56:06 +08:00
|
|
|
out_delete_evlist:
|
2013-03-15 13:58:11 +08:00
|
|
|
perf_evlist__delete(evlist);
|
|
|
|
return err;
|
|
|
|
}
|