libperf: Add perf_thread_map__new_dummy() function

Moving the following functions:

  thread_map__new_dummy()
  thread_map__realloc()
  thread_map__set_pid()

to libperf with the following names:

  perf_thread_map__new_dummy()
  perf_thread_map__realloc()
  perf_thread_map__set_pid()

the other 2 functions are dependencies of the
perf_thread_map__new_dummy() function.

The perf_thread_map__realloc() function is not exported.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20190721112506.12306-33-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Jiri Olsa 2019-07-21 13:24:19 +02:00 committed by Arnaldo Carvalho de Melo
parent 07acd22677
commit 4b49cce25e
9 changed files with 71 additions and 57 deletions

View file

@ -28,30 +28,7 @@ static int filter(const struct dirent *dir)
return 1;
}
static void thread_map__reset(struct perf_thread_map *map, int start, int nr)
{
size_t size = (nr - start) * sizeof(map->map[0]);
memset(&map->map[start], 0, size);
map->err_thread = -1;
}
static struct perf_thread_map *thread_map__realloc(struct perf_thread_map *map, int nr)
{
size_t size = sizeof(*map) + sizeof(map->map[0]) * nr;
int start = map ? map->nr : 0;
map = realloc(map, size);
/*
* We only realloc to add more items, let's reset new items.
*/
if (map)
thread_map__reset(map, start, nr);
return map;
}
#define thread_map__alloc(__nr) thread_map__realloc(NULL, __nr)
#define thread_map__alloc(__nr) perf_thread_map__realloc(NULL, __nr)
struct perf_thread_map *thread_map__new_by_pid(pid_t pid)
{
@ -69,7 +46,7 @@ struct perf_thread_map *thread_map__new_by_pid(pid_t pid)
threads = thread_map__alloc(items);
if (threads != NULL) {
for (i = 0; i < items; i++)
thread_map__set_pid(threads, i, atoi(namelist[i]->d_name));
perf_thread_map__set_pid(threads, i, atoi(namelist[i]->d_name));
threads->nr = items;
refcount_set(&threads->refcnt, 1);
}
@ -86,7 +63,7 @@ struct perf_thread_map *thread_map__new_by_tid(pid_t tid)
struct perf_thread_map *threads = thread_map__alloc(1);
if (threads != NULL) {
thread_map__set_pid(threads, 0, tid);
perf_thread_map__set_pid(threads, 0, tid);
threads->nr = 1;
refcount_set(&threads->refcnt, 1);
}
@ -142,7 +119,7 @@ static struct perf_thread_map *__thread_map__new_all_cpus(uid_t uid)
if (grow) {
struct perf_thread_map *tmp;
tmp = thread_map__realloc(threads, max_threads);
tmp = perf_thread_map__realloc(threads, max_threads);
if (tmp == NULL)
goto out_free_namelist;
@ -150,8 +127,8 @@ static struct perf_thread_map *__thread_map__new_all_cpus(uid_t uid)
}
for (i = 0; i < items; i++) {
thread_map__set_pid(threads, threads->nr + i,
atoi(namelist[i]->d_name));
perf_thread_map__set_pid(threads, threads->nr + i,
atoi(namelist[i]->d_name));
}
for (i = 0; i < items; i++)
@ -233,14 +210,14 @@ static struct perf_thread_map *thread_map__new_by_pid_str(const char *pid_str)
goto out_free_threads;
total_tasks += items;
nt = thread_map__realloc(threads, total_tasks);
nt = perf_thread_map__realloc(threads, total_tasks);
if (nt == NULL)
goto out_free_namelist;
threads = nt;
for (i = 0; i < items; i++) {
thread_map__set_pid(threads, j++, atoi(namelist[i]->d_name));
perf_thread_map__set_pid(threads, j++, atoi(namelist[i]->d_name));
zfree(&namelist[i]);
}
threads->nr = total_tasks;
@ -263,18 +240,6 @@ out_free_threads:
goto out;
}
struct perf_thread_map *thread_map__new_dummy(void)
{
struct perf_thread_map *threads = thread_map__alloc(1);
if (threads != NULL) {
thread_map__set_pid(threads, 0, -1);
threads->nr = 1;
refcount_set(&threads->refcnt, 1);
}
return threads;
}
struct perf_thread_map *thread_map__new_by_tid_str(const char *tid_str)
{
struct perf_thread_map *threads = NULL, *nt;
@ -287,7 +252,7 @@ struct perf_thread_map *thread_map__new_by_tid_str(const char *tid_str)
/* perf-stat expects threads to be generated even if tid not given */
if (!tid_str)
return thread_map__new_dummy();
return perf_thread_map__new_dummy();
slist = strlist__new(tid_str, &slist_config);
if (!slist)
@ -304,13 +269,13 @@ struct perf_thread_map *thread_map__new_by_tid_str(const char *tid_str)
continue;
ntasks++;
nt = thread_map__realloc(threads, ntasks);
nt = perf_thread_map__realloc(threads, ntasks);
if (nt == NULL)
goto out_free_threads;
threads = nt;
thread_map__set_pid(threads, ntasks - 1, tid);
perf_thread_map__set_pid(threads, ntasks - 1, tid);
threads->nr = ntasks;
}
out:
@ -437,7 +402,7 @@ static void thread_map__copy_event(struct perf_thread_map *threads,
threads->nr = (int) event->nr;
for (i = 0; i < event->nr; i++) {
thread_map__set_pid(threads, i, (pid_t) event->entries[i].pid);
perf_thread_map__set_pid(threads, i, (pid_t) event->entries[i].pid);
threads->map[i].comm = strndup(event->entries[i].comm, 16);
}