[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-arm] [Qemu-devel] [PATCH v8 04/16] gdbstub: add multiprocess s
From: |
Alistair Francis |
Subject: |
Re: [Qemu-arm] [Qemu-devel] [PATCH v8 04/16] gdbstub: add multiprocess support to 'H' and 'T' packets |
Date: |
Fri, 7 Dec 2018 16:55:58 -0800 |
On Fri, Dec 7, 2018 at 1:02 AM Luc Michel <address@hidden> wrote:
>
> Add a couple of helper functions to cope with GDB threads and processes.
>
> The gdb_get_process() function looks for a process given a pid.
>
> The gdb_get_cpu() function returns the CPU corresponding to the (pid,
> tid) pair given as parameters.
>
> The read_thread_id() function parses the thread-id sent by the peer.
> This function supports the multiprocess extension thread-id syntax. The
> return value specifies if the parsing failed, or if a special case was
> encountered (all processes or all threads).
>
> Use them in 'H' and 'T' packets handling to support the multiprocess
> extension.
>
> Signed-off-by: Luc Michel <address@hidden>
> Reviewed-by: Philippe Mathieu-Daudé <address@hidden>
> Reviewed-by: Edgar E. Iglesias <address@hidden>
Acked-by: Alistair Francis <address@hidden>
Alistair
> ---
> gdbstub.c | 154 +++++++++++++++++++++++++++++++++++++++++++++++-------
> 1 file changed, 136 insertions(+), 18 deletions(-)
>
> diff --git a/gdbstub.c b/gdbstub.c
> index 07508c2e6b..911faa225a 100644
> --- a/gdbstub.c
> +++ b/gdbstub.c
> @@ -685,10 +685,75 @@ out:
> /* TODO: In user mode, we should use the task state PID */
> return s->processes[s->process_num - 1].pid;
> #endif
> }
>
> +static GDBProcess *gdb_get_process(const GDBState *s, uint32_t pid)
> +{
> + int i;
> +
> + if (!pid) {
> + /* 0 means any process, we take the first one */
> + return &s->processes[0];
> + }
> +
> + for (i = 0; i < s->process_num; i++) {
> + if (s->processes[i].pid == pid) {
> + return &s->processes[i];
> + }
> + }
> +
> + return NULL;
> +}
> +
> +static GDBProcess *gdb_get_cpu_process(const GDBState *s, CPUState *cpu)
> +{
> + return gdb_get_process(s, gdb_get_cpu_pid(s, cpu));
> +}
> +
> +static CPUState *find_cpu(uint32_t thread_id)
> +{
> + CPUState *cpu;
> +
> + CPU_FOREACH(cpu) {
> + if (cpu_gdb_index(cpu) == thread_id) {
> + return cpu;
> + }
> + }
> +
> + return NULL;
> +}
> +
> +static CPUState *gdb_get_cpu(const GDBState *s, uint32_t pid, uint32_t tid)
> +{
> + GDBProcess *process;
> + CPUState *cpu;
> +
> + if (!tid) {
> + /* 0 means any thread, we take the first one */
> + tid = 1;
> + }
> +
> + cpu = find_cpu(tid);
> +
> + if (cpu == NULL) {
> + return NULL;
> + }
> +
> + process = gdb_get_cpu_process(s, cpu);
> +
> + if (process->pid != pid) {
> + return NULL;
> + }
> +
> + if (!process->attached) {
> + return NULL;
> + }
> +
> + return cpu;
> +}
> +
> static const char *get_feature_xml(const char *p, const char **newp,
> CPUClass *cc)
> {
> size_t len;
> int i;
> @@ -941,23 +1006,10 @@ static void gdb_set_cpu_pc(GDBState *s, target_ulong
> pc)
>
> cpu_synchronize_state(cpu);
> cpu_set_pc(cpu, pc);
> }
>
> -static CPUState *find_cpu(uint32_t thread_id)
> -{
> - CPUState *cpu;
> -
> - CPU_FOREACH(cpu) {
> - if (cpu_gdb_index(cpu) == thread_id) {
> - return cpu;
> - }
> - }
> -
> - return NULL;
> -}
> -
> static char *gdb_fmt_thread_id(const GDBState *s, CPUState *cpu,
> char *buf, size_t buf_size)
> {
> if (s->multiprocess) {
> snprintf(buf, buf_size, "p%02x.%02x",
> @@ -967,10 +1019,64 @@ static char *gdb_fmt_thread_id(const GDBState *s,
> CPUState *cpu,
> }
>
> return buf;
> }
>
> +typedef enum GDBThreadIdKind {
> + GDB_ONE_THREAD = 0,
> + GDB_ALL_THREADS, /* One process, all threads */
> + GDB_ALL_PROCESSES,
> + GDB_READ_THREAD_ERR
> +} GDBThreadIdKind;
> +
> +static GDBThreadIdKind read_thread_id(const char *buf, const char **end_buf,
> + uint32_t *pid, uint32_t *tid)
> +{
> + unsigned long p, t;
> + int ret;
> +
> + if (*buf == 'p') {
> + buf++;
> + ret = qemu_strtoul(buf, &buf, 16, &p);
> +
> + if (ret) {
> + return GDB_READ_THREAD_ERR;
> + }
> +
> + /* Skip '.' */
> + buf++;
> + } else {
> + p = 1;
> + }
> +
> + ret = qemu_strtoul(buf, &buf, 16, &t);
> +
> + if (ret) {
> + return GDB_READ_THREAD_ERR;
> + }
> +
> + *end_buf = buf;
> +
> + if (p == -1) {
> + return GDB_ALL_PROCESSES;
> + }
> +
> + if (pid) {
> + *pid = p;
> + }
> +
> + if (t == -1) {
> + return GDB_ALL_THREADS;
> + }
> +
> + if (tid) {
> + *tid = t;
> + }
> +
> + return GDB_ONE_THREAD;
> +}
> +
> static int is_query_packet(const char *p, const char *query, char separator)
> {
> unsigned int query_len = strlen(query);
>
> return strncmp(p, query, query_len) == 0 &&
> @@ -1075,16 +1181,18 @@ static int gdb_handle_packet(GDBState *s, const char
> *line_buf)
> {
> CPUState *cpu;
> CPUClass *cc;
> const char *p;
> uint32_t thread;
> + uint32_t pid, tid;
> int ch, reg_size, type, res;
> uint8_t mem_buf[MAX_PACKET_LENGTH];
> char buf[sizeof(mem_buf) + 1 /* trailing NUL */];
> char thread_id[16];
> uint8_t *registers;
> target_ulong addr, len;
> + GDBThreadIdKind thread_kind;
>
> trace_gdbstub_io_command(line_buf);
>
> p = line_buf;
> ch = *p++;
> @@ -1288,16 +1396,22 @@ static int gdb_handle_packet(GDBState *s, const char
> *line_buf)
> else
> put_packet(s, "E22");
> break;
> case 'H':
> type = *p++;
> - thread = strtoull(p, (char **)&p, 16);
> - if (thread == -1 || thread == 0) {
> +
> + thread_kind = read_thread_id(p, &p, &pid, &tid);
> + if (thread_kind == GDB_READ_THREAD_ERR) {
> + put_packet(s, "E22");
> + break;
> + }
> +
> + if (thread_kind != GDB_ONE_THREAD) {
> put_packet(s, "OK");
> break;
> }
> - cpu = find_cpu(thread);
> + cpu = gdb_get_cpu(s, pid, tid);
> if (cpu == NULL) {
> put_packet(s, "E22");
> break;
> }
> switch (type) {
> @@ -1313,12 +1427,16 @@ static int gdb_handle_packet(GDBState *s, const char
> *line_buf)
> put_packet(s, "E22");
> break;
> }
> break;
> case 'T':
> - thread = strtoull(p, (char **)&p, 16);
> - cpu = find_cpu(thread);
> + thread_kind = read_thread_id(p, &p, &pid, &tid);
> + if (thread_kind == GDB_READ_THREAD_ERR) {
> + put_packet(s, "E22");
> + break;
> + }
> + cpu = gdb_get_cpu(s, pid, tid);
>
> if (cpu != NULL) {
> put_packet(s, "OK");
> } else {
> put_packet(s, "E22");
> --
> 2.19.2
>
>
- [Qemu-arm] [PATCH v8 00/16] gdbstub: support for the multiprocess extension, Luc Michel, 2018/12/07
- [Qemu-arm] [PATCH v8 09/16] gdbstub: add multiprocess support to gdb_vm_state_change(), Luc Michel, 2018/12/07
- [Qemu-arm] [PATCH v8 06/16] gdbstub: add multiprocess support to 'sC' packets, Luc Michel, 2018/12/07
- [Qemu-arm] [PATCH v8 07/16] gdbstub: add multiprocess support to (f|s)ThreadInfo and ThreadExtraInfo, Luc Michel, 2018/12/07
- [Qemu-arm] [PATCH v8 03/16] gdbstub: add multiprocess support to '?' packets, Luc Michel, 2018/12/07
- [Qemu-arm] [PATCH v8 02/16] gdbstub: introduce GDB processes, Luc Michel, 2018/12/07
- [Qemu-arm] [PATCH v8 12/16] gdbstub: add support for vAttach packets, Luc Michel, 2018/12/07
- [Qemu-arm] [PATCH v8 04/16] gdbstub: add multiprocess support to 'H' and 'T' packets, Luc Michel, 2018/12/07
- Re: [Qemu-arm] [Qemu-devel] [PATCH v8 04/16] gdbstub: add multiprocess support to 'H' and 'T' packets,
Alistair Francis <=
- [Qemu-arm] [PATCH v8 08/16] gdbstub: add multiprocess support to Xfer:features:read:, Luc Michel, 2018/12/07
- [Qemu-arm] [PATCH v8 05/16] gdbstub: add multiprocess support to vCont packets, Luc Michel, 2018/12/07
- [Qemu-arm] [PATCH v8 01/16] hw/cpu: introduce CPU clusters, Luc Michel, 2018/12/07
- [Qemu-arm] [PATCH v8 10/16] gdbstub: add multiprocess support to 'D' packets, Luc Michel, 2018/12/07
- [Qemu-arm] [PATCH v8 13/16] gdbstub: processes initialization on new peer connection, Luc Michel, 2018/12/07
- [Qemu-arm] [PATCH v8 16/16] arm/xlnx-zynqmp: put APUs and RPUs in separate CPU clusters, Luc Michel, 2018/12/07
- [Qemu-arm] [PATCH v8 14/16] gdbstub: gdb_set_stop_cpu: ignore request when process is not attached, Luc Michel, 2018/12/07
- [Qemu-arm] [PATCH v8 11/16] gdbstub: add support for extended mode packet, Luc Michel, 2018/12/07
- [Qemu-arm] [PATCH v8 15/16] gdbstub: add multiprocess extension support, Luc Michel, 2018/12/07
- Re: [Qemu-arm] [PATCH v8 00/16] gdbstub: support for the multiprocess extension, Luc Michel, 2018/12/17