diff -Nru ltrace-0.7.3/debian/changelog ltrace-0.7.3/debian/changelog --- ltrace-0.7.3/debian/changelog 2023-02-25 19:40:34.000000000 +0100 +++ ltrace-0.7.3/debian/changelog 2024-02-02 13:13:08.000000000 +0100 @@ -1,3 +1,20 @@ +ltrace (0.7.3-6.4ubuntu1) noble; urgency=medium + + * Merge from Debian unstable. Remaining changes: + - Set architecture to linux-any. + - sysdeps/linux-gnu/trace.c: try to make PTRACE scope sysctl more + discoverable. + - Define _LARGEFILE_SOURCE and _LARGEFILE64_SOURCE in debian/rules CFLAGS. + The configure script has a bug where it can't properly cope with the need + to define these before detecting the use of elfutils. + - Add patch to support arm64. + - Add patch to fix ltrace on binaries compiled on recent Ubuntu releases (LP: #1992939) + - Add various patches to support ppc64el. + * Dropped changes, no longer needed: + - Include cdbs' autoreconf rules to generate new Makefiles + + -- Ravi Kant Sharma Fri, 02 Feb 2024 13:13:08 +0100 + ltrace (0.7.3-6.4) unstable; urgency=medium * Non-maintainer upload. @@ -31,6 +48,59 @@ -- Guilherme de Paula Xavier Segundo Wed, 30 Mar 2022 20:48:13 -0300 +ltrace (0.7.3-6.1ubuntu7) noble; urgency=medium + + * Fix ltrace on binaries compiled on recent Ubuntu releases (LP: #1992939) + - debian/patches/lp1992939-add-intel-cet-support.patch, thanks to DJ Delorie + + -- Ravi Kant Sharma Fri, 08 Dec 2023 14:54:53 +0100 + +ltrace (0.7.3-6.1ubuntu6) jammy; urgency=medium + + * Fix backport of upstream patch for ppc64el support (LP: #1967518) + + -- Olivier Gayot Fri, 01 Apr 2022 12:28:12 +0200 + +ltrace (0.7.3-6.1ubuntu5) jammy; urgency=medium + + * Remove use of cdbs which is awful and must die. + + -- Steve Langasek Wed, 23 Mar 2022 22:52:13 -0700 + +ltrace (0.7.3-6.1ubuntu4) jammy; urgency=medium + + * Update debhelper compat to 12. Closes: #965705. + + -- Steve Langasek Wed, 23 Mar 2022 20:51:14 -0700 + +ltrace (0.7.3-6.1ubuntu3) impish; urgency=medium + + * No-change rebuild to build packages with zstd compression. + + -- Matthias Klose Thu, 07 Oct 2021 12:20:25 +0200 + +ltrace (0.7.3-6.1ubuntu2) groovy; urgency=medium + + * No-change rebuild to bring back i386 binaries. + + -- Steve Langasek Mon, 29 Jun 2020 08:28:43 -0700 + +ltrace (0.7.3-6.1ubuntu1) disco; urgency=low + + * Merge from Debian unstable. Remaining changes: + - Set architecture to linux-any. + - sysdeps/linux-gnu/trace.c: try to make PTRACE scope sysctl more + discoverable. + - Define _LARGEFILE_SOURCE and _LARGEFILE64_SOURCE in debian/rules CFLAGS. + The configure script has a bug where it can't properly cope with the need + to define these before detecting the use of elfutils. + - Add patch to support arm64. + - Include cdbs' autoreconf rules to generate new Makefiles + - Add various patches to support ppc64el. + - Build-depend on dh-autoreconf instead of autotools-dev. + + -- Steve Langasek Thu, 31 Jan 2019 16:10:30 -0800 + ltrace (0.7.3-6.1) unstable; urgency=medium * Non-maintainer upload. @@ -39,12 +109,108 @@ -- Adrian Bunk Mon, 20 Aug 2018 09:09:32 +0300 +ltrace (0.7.3-6ubuntu1) zesty; urgency=low + + * Merge from Debian unstable. Remaining changes: + - Set architecture to linux-any. + - sysdeps/linux-gnu/trace.c: try to make PTRACE scope sysctl more + discoverable. + - Define _LARGEFILE_SOURCE and _LARGEFILE64_SOURCE in debian/rules CFLAGS. + The configure script has a bug where it can't properly cope with the need + to define these before detecting the use of elfutils. + - Add patch to support arm64. + - Include cdbs' autoreconf rules to generate new Makefiles + - Add various patches to support ppc64el. + - Build-depend on dh-autoreconf instead of autotools-dev. + + -- Steve Langasek Wed, 12 Oct 2016 23:20:41 -0700 + ltrace (0.7.3-6) unstable; urgency=medium * Fixed compilation error in amd64: readdir_r is deprecated (closes: #837011) -- Juan Cespedes Wed, 21 Sep 2016 13:15:35 +0200 +ltrace (0.7.3-5.1ubuntu4) xenial; urgency=medium + + * debian/patches/LP1547152.diff: removed. + * Backport more ppc64el fixes to fix tracing on PPC64; backporting the + required bits and pieces from the rest of ltrace (LP: #1547152, #1398143) + - add_irelative_tracing_b420a226.patch: add support for IRELATIVE tracing. + - find_irelative_b061bae3.patch: split the function + linux_elf_find_irelative_name out of linux_elf_add_plt_entry_irelative + - keep_plt_reloc_in_vector_673ff510.patch: keep PLT relocs in a vector. + - add_elf_each_symbol_7a29f9e7.patch: add elf_each_symbol function for + iteration. + - add_elf_can_read_next_5c37171a.patch: add the elf_can_read_next method. + - add_elf_read_next_u_439ab5bf.patch: add methods for doing stream-like + reads for various types. + - add_elf_read_u8_3c636fb7.patch: add read for u8. + - elf_read_uleb128_184779e4.patch: add elf_read_*_uleb128. + - elf_load_dynamic_entry_4f2f66e6.patch: add function load_dynamic_entry. + - arm_attr_decoding_df7d2311.patch: implement ARM attribute decoding, + this can determine when hardfp is used in the process. + - arm_fetch_backend_97a25160.patch: add fetch backend for float and double + return values on ARM. + - arm_backend_fixes_1383e5bd.patch: misc ARM backend fixes. + - arm_bpc_62fc7747.patch: implement Base Procedure Call Standard. + - arm_vfp_params_1c8596d4.patch: implement VFP parameter passing for ARM. + - arm_vararg_without_vfp_88a0fe50.patch: we need to handle varargs in ARM + without VFP. + - arm_plt_rel_9e33f5ac.patch: unbreak ARM wrt the previous patch, relplt + got removed from struct rtelf; so fix this to still work. + - dont_ltelf_destroy_if_init_fails_0ba3c5ee.patch: don't call + ltelf_destroy if ltelf_init fails (ie. for ENOENT). + - ppc64el.diff: backported eea4ad2c to replace the patch that was already + there, as it includes support for irelative and wchar. + - jmp_irel.patch: backport 73b85aad: support tracing P_PPC64_JMP_IREL. + - ppc64le-fixes.patch: more misc backports for ppc64 fixes, patch from + Fedora packaging git. + + [35a9677d] fix bugs in fetch backend of powerpc64le + + [a46c07fc] Fix coding style in PowerPC's arch.h + + [44789e1e] PowerPC: convert ELFv2 conditionals from preprocessor to + plain conditions. + - ppc64-fork.patch: backport 35742523: Fix tracing across fork on PPC64. + - on_install_breakpoint_56134ff5.patch: ensure we do have the on_install + breakpoint needed for the unprelink patch. + - ppc64-unprelink.patch: backport a0093ca4: Don't crash untraced calls via + PLT in prelinked PPC64 binaries. + - ppc-bias.patch: backport three commits for bias and unresolved breakports + in PPC: + + [bf821009] Fix address biasing in PPC backend + + [d80c5371] Fix cloning of PPC_PLT_NEED_UNRESOLVE breakpoints + + [d8f1287b] Nits + + -- Mathieu Trudel-Lapierre Wed, 06 Apr 2016 18:58:54 -0400 + +ltrace (0.7.3-5.1ubuntu3) xenial; urgency=medium + + * debian/patches/LP1547152.diff: add support for ppc64el. + Thanks to Thierry Fauck . Closes LP: 1547152. + + -- Steve Langasek Wed, 09 Mar 2016 06:39:17 -0800 + +ltrace (0.7.3-5.1ubuntu2) xenial; urgency=medium + + * Build-depend on dh-autoreconf instead of autotools-dev. + + -- Matthias Klose Thu, 07 Jan 2016 10:42:16 +0100 + +ltrace (0.7.3-5.1ubuntu1) xenial; urgency=medium + + * Merge with Debian; remaining changes: + - Set architecture to linux-any. + - sysdeps/linux-gnu/trace.c: try to make PTRACE scope sysctl more + discoverable. + - Define _LARGEFILE_SOURCE and _LARGEFILE64_SOURCE in debian/rules CFLAGS. + The configure script has a bug where it can't properly cope with the need + to define these before detecting the use of elfutils. + - Add patch to support arm64. + - Include cdbs' autoreconf rules to generate new Makefiles + - Add patch to support ppc64el. + + -- Matthias Klose Thu, 07 Jan 2016 10:07:17 +0100 + ltrace (0.7.3-5.1) unstable; urgency=medium * Non-maintainer upload. @@ -62,6 +228,57 @@ -- Juan Cespedes Fri, 03 Jul 2015 17:16:04 +0200 +ltrace (0.7.3-4ubuntu7) vivid; urgency=medium + + * Fix build with GCC 5. + + -- Matthias Klose Fri, 06 Mar 2015 17:11:13 +0100 + +ltrace (0.7.3-4ubuntu6) utopic; urgency=medium + + * debian/ptrace.diff: updated to restore PTRACE scope sysctl warning + (LP: #1317136) + + -- Marc Deslauriers Wed, 07 May 2014 15:32:12 -0400 + +ltrace (0.7.3-4ubuntu5) trusty; urgency=medium + + * Build-depend on dh-autoreconf. + + -- Matthias Klose Thu, 20 Mar 2014 16:18:36 +0100 + +ltrace (0.7.3-4ubuntu4) trusty; urgency=medium + + * Add patch to support arm64 (LP: #1292089). + * Include cdbs' autoreconf rules to generate new Makefiles + + -- dann frazier Thu, 13 Mar 2014 10:17:20 -0600 + +ltrace (0.7.3-4ubuntu3) trusty; urgency=medium + + * Update patch to support ppc64el. + + -- Matthias Klose Mon, 24 Feb 2014 16:37:49 +0100 + +ltrace (0.7.3-4ubuntu2) trusty; urgency=medium + + * Add patch to support ppc64el. + + -- Matthias Klose Thu, 09 Jan 2014 16:10:19 +0100 + +ltrace (0.7.3-4ubuntu1) trusty; urgency=medium + + * Merge with Debian; remaining changes: + - Set architecture to linux-any. + - sysdeps/linux-gnu/trace.c: try to make PTRACE scope sysctl more + discoverable. + - Use libelf-dev instead of libelfg0-dev + - Define _LARGEFILE_SOURCE and _LARGEFILE64_SOURCE in debian/rules CFLAGS. + The configure script has a bug where it can't properly cope with the need + to define these before detecting the use of elfutils. + + -- Matthias Klose Mon, 06 Jan 2014 12:57:31 +0100 + ltrace (0.7.3-4) unstable; urgency=low * Get rid of dh_autoreconf (not needed) @@ -113,6 +330,32 @@ -- David Prévot Sat, 21 Dec 2013 19:21:35 -0400 +ltrace (0.5.3-2.1ubuntu3) quantal; urgency=low + + * Rebuild for new armel compiler default of ARMv5t. + + -- Colin Watson Tue, 02 Oct 2012 16:36:36 +0100 + +ltrace (0.5.3-2.1ubuntu2) precise; urgency=low + + * Build for armhf. + + -- Matthias Klose Mon, 05 Dec 2011 16:43:20 +0100 + +ltrace (0.5.3-2.1ubuntu1) natty; urgency=low + + * Merge with Debian; remaining changes: + - Fix ARM syscall_p to handle Thumb-2 mode (Zach Welch). LP: #639796. + - sysdeps/linux-gnu/trace.c: adjust sysctl hint to include new yama path. + - sysdeps/linux-gnu/trace.c: try to make PTRACE scope sysctl more + discoverable. + - Use libelf-dev instead of libelfg0-dev + - Define _LARGEFILE_SOURCE and _LARGEFILE64_SOURCE in debian/rules CFLAGS. + The configure script has a bug where it can't properly cope with the need + to define these before detecting the use of elfutils. + + -- Matthias Klose Wed, 24 Nov 2010 17:58:11 +0100 + ltrace (0.5.3-2.1) unstable; urgency=low * Non-maintainer upload. @@ -121,6 +364,49 @@ -- Jakub Wilk Sun, 02 May 2010 10:27:16 +0200 +ltrace (0.5.3-2ubuntu6) maverick; urgency=low + + * Fix ARM syscall_p to handle Thumb-2 mode (Zach Welch). LP: #639796. + + -- Matthias Klose Fri, 01 Oct 2010 16:06:14 +0200 + +ltrace (0.5.3-2ubuntu5) maverick; urgency=low + + * sysdeps/linux-gnu/trace.c: adjust sysctl hint to include new yama path. + + -- Kees Cook Tue, 06 Jul 2010 15:18:57 -0700 + +ltrace (0.5.3-2ubuntu4) maverick; urgency=low + + * sysdeps/linux-gnu/trace.c: try to make PTRACE scope sysctl more + discoverable. + + -- Kees Cook Wed, 09 Jun 2010 16:49:53 -0700 + +ltrace (0.5.3-2ubuntu3) lucid; urgency=low + + * rebuild rest of main for armel armv7/thumb2 optimization; + UbuntuSpec:mobile-lucid-arm-gcc-v7-thumb2 + + -- Alexander Sack Sun, 07 Mar 2010 00:56:41 +0100 + +ltrace (0.5.3-2ubuntu2) karmic; urgency=low + + * sysdeps/linux-gnu/ppc/plt.c: Include . + + -- Matthias Klose Sun, 27 Sep 2009 14:05:50 +0200 + +ltrace (0.5.3-2ubuntu1) karmic; urgency=low + + * Merge from debian unstable, remaining changes: LP: #404856 + - Add lpia to architecture list. + - Use libelf-dev instead of libelfg0-dev + - Define _LARGEFILE_SOURCE and _LARGEFILE64_SOURCE in debian/rules CFLAGS. + The configure script has a bug where it can't properly cope with the need + to define these before detecting the use of elfutils. + + -- Bhavani Shankar Tue, 28 Jul 2009 16:44:35 +0530 + ltrace (0.5.3-2) unstable; urgency=low * Fixed compilation problems in armel, ia64 and powerpc (closes: Bug#538441) @@ -134,6 +420,17 @@ -- Juan Cespedes Sat, 25 Jul 2009 16:24:38 +0200 +ltrace (0.5.2-2ubuntu1) karmic; urgency=low + + * Merge from debian unstable, remaining changes: + - Add lpia to architecture list. + - Use libelf-dev instead of libelfg0-dev + - Define _LARGEFILE_SOURCE and _LARGEFILE64_SOURCE in debian/rules CFLAGS. + The configure script has a bug where it can't properly cope with the need + to define these before detecting the use of elfutils. + + -- Michael Vogt Wed, 03 Jun 2009 11:03:35 +0200 + ltrace (0.5.2-2) unstable; urgency=low * Make clone() work when child starts after parent finishes @@ -150,6 +447,23 @@ -- Juan Cespedes Thu, 21 May 2009 19:16:22 +0200 +ltrace (0.5.1-2ubuntu1) jaunty; urgency=low + + [ Bhavani Shankar ] + * Merge from debian unstable, remaining changes: LP: #313530 + - Add lpia to architecture list. + - Use libelf-dev instead of libelfg0-dev + - Define _LARGEFILE_SOURCE and _LARGEFILE64_SOURCE in debian/rules CFLAGS. + The configure script has a bug where it can't properly cope with the need + to define these before detecting the use of elfutils. + + [ Colin Watson ] + * Remove stray dpatch build-dependency. + * Change libelfg0-dev to libelf-dev in debian/control.in as well as in + debian/control. + + -- Bhavani Shankar Tue, 06 Jan 2009 17:46:28 +0000 + ltrace (0.5.1-2) unstable; urgency=low * Red-added armel and armeb to debian/control (closes: Bug#463023) @@ -165,6 +479,23 @@ -- Juan Cespedes Wed, 10 Dec 2008 18:41:20 +0100 +ltrace (0.5-3.1ubuntu2) intrepid; urgency=low + + * Use libelf-dev instead of libelfg0-dev + * Define _LARGEFILE_SOURCE and _LARGEFILE64_SOURCE in debian/rules CFLAGS. + The configure script has a bug where it can't properly cope with the need + to define these before detecting the use of elfutils. + + -- Ben Collins Wed, 02 Jul 2008 11:10:41 -0400 + +ltrace (0.5-3.1ubuntu1) intrepid; urgency=low + + * Merge from debian unstable, remaining changes: + - Add lpia to architecture list. + - Fix check for host_os in configure. + + -- Michael Vogt Tue, 27 May 2008 10:51:22 +0200 + ltrace (0.5-3.1) unstable; urgency=low * Non-maintainer upload. @@ -180,6 +511,15 @@ -- Riku Voipio Tue, 29 Jan 2008 00:26:50 +0200 +ltrace (0.5-3ubuntu1) hardy; urgency=low + + * Merge from debian unstable, remaining changes: + - Add lpia to architecture list. + - Set Ubuntu maintainer address. + - Fix check for host_os in configure. + + -- Michael Vogt Thu, 15 Nov 2007 12:47:01 +0100 + ltrace (0.5-3) unstable; urgency=low * Really fix compilation problems in ppc (!) @@ -208,6 +548,19 @@ -- Juan Cespedes Tue, 07 Aug 2007 11:49:27 +0200 +ltrace (0.4-1ubuntu2) gutsy; urgency=low + + * Fix check for host_os in configure. + + -- Matthias Klose Tue, 31 Jul 2007 08:13:07 +0000 + +ltrace (0.4-1ubuntu1) gutsy; urgency=low + + * Add lpia to architecture list. + * Set Ubuntu maintainer address. + + -- Matthias Klose Mon, 30 Jul 2007 23:56:37 +0200 + ltrace (0.4-1) unstable; urgency=low * Rebase code from Redhat patches, now everything lives in SVN @@ -312,7 +665,7 @@ + Added alpha support -- Juan Cespedes Mon, 14 Jun 2004 18:01:12 +0200 - + ltrace (0.3.32) unstable; urgency=low * Fixed wrong version number @@ -779,3 +1132,4 @@ * Re-structured most of the code; new files: elf.c, i386.c, trace.c -- Juan Cespedes Sat, 9 Aug 1997 20:55:24 +0200 + diff -Nru ltrace-0.7.3/debian/control ltrace-0.7.3/debian/control --- ltrace-0.7.3/debian/control 2023-02-25 19:22:06.000000000 +0100 +++ ltrace-0.7.3/debian/control 2024-02-02 13:00:59.000000000 +0100 @@ -1,12 +1,13 @@ Source: ltrace Section: utils Priority: optional -Maintainer: Juan Cespedes +Maintainer: Ubuntu Developers +XSBC-Original-Maintainer: Juan Cespedes Standards-Version: 3.9.8 Build-Depends: debhelper-compat (= 13), libiberty-dev, libelf-dev, libselinux1-dev Package: ltrace -Architecture: alpha amd64 ia64 i386 powerpc powerpcspe ppc64 s390 s390x sparc +Architecture: linux-any Depends: ${shlibs:Depends}, ${misc:Depends} Description: Tracks runtime library calls in dynamically linked programs ltrace is a debugging program which runs a specified command until it diff -Nru ltrace-0.7.3/debian/patches/add_elf_can_read_next_5c37171a.patch ltrace-0.7.3/debian/patches/add_elf_can_read_next_5c37171a.patch --- ltrace-0.7.3/debian/patches/add_elf_can_read_next_5c37171a.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/add_elf_can_read_next_5c37171a.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,57 @@ +From 5c37171a18bddfbc716d4f3da8b008a844eea4f7 Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Tue, 5 Feb 2013 01:52:37 +0100 +Subject: Add elf_can_read_next + +--- + ltrace-elf.c | 10 +++++----- + ltrace-elf.h | 3 +++ + 2 files changed, 8 insertions(+), 5 deletions(-) + +Index: b/ltrace-elf.c +=================================================================== +--- a/ltrace-elf.c ++++ b/ltrace-elf.c +@@ -202,23 +202,23 @@ elf_get_section_named(struct ltelf *lte, + &name_p, &data); + } + +-static int +-need_data(Elf_Data *data, GElf_Xword offset, GElf_Xword size) ++int ++elf_can_read_next(Elf_Data *data, GElf_Xword offset, GElf_Xword size) + { + assert(data != NULL); + if (data->d_size < size || offset > data->d_size - size) { + debug(1, "Not enough data to read %"PRId64"-byte value" + " at offset %"PRId64".", size, offset); +- return -1; ++ return 0; + } +- return 0; ++ return 1; + } + + #define DEF_READER(NAME, SIZE) \ + int \ + NAME(Elf_Data *data, GElf_Xword offset, uint##SIZE##_t *retp) \ + { \ +- if (need_data(data, offset, SIZE / 8) < 0) \ ++ if (!elf_can_read_next(data, offset, SIZE / 8)) \ + return -1; \ + \ + if (data->d_buf == NULL) /* NODATA section */ { \ +Index: b/ltrace-elf.h +=================================================================== +--- a/ltrace-elf.h ++++ b/ltrace-elf.h +@@ -116,6 +116,9 @@ int elf_read_next_u16(Elf_Data *data, GE + int elf_read_next_u32(Elf_Data *data, GElf_Xword *offset, uint32_t *retp); + int elf_read_next_u64(Elf_Data *data, GElf_Xword *offset, uint64_t *retp); + ++/* Return whether there's AMOUNT more bytes after OFFSET in DATA. */ ++int elf_can_read_next(Elf_Data *data, GElf_Xword offset, GElf_Xword amount); ++ + #if __WORDSIZE == 32 + #define PRI_ELF_ADDR PRIx32 + #define GELF_ADDR_CAST(x) (void *)(uint32_t)(x) diff -Nru ltrace-0.7.3/debian/patches/add_elf_each_symbol_7a29f9e7.patch ltrace-0.7.3/debian/patches/add_elf_each_symbol_7a29f9e7.patch --- ltrace-0.7.3/debian/patches/add_elf_each_symbol_7a29f9e7.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/add_elf_each_symbol_7a29f9e7.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,111 @@ +From 7a29f9e7a2bd5849886519eb82e9c043d24c6a40 Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Mon, 14 Oct 2013 20:04:09 +0200 +Subject: Add elf_each_symbol + +--- + ltrace-elf.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + ltrace-elf.h | 12 +++++++++++ + 2 files changed, 72 insertions(+) + +Index: b/ltrace-elf.c +=================================================================== +--- a/ltrace-elf.c ++++ b/ltrace-elf.c +@@ -190,6 +190,66 @@ name_p(Elf_Scn *scn, GElf_Shdr *shdr, vo + return strcmp(name, data->name) == 0; + } + ++static struct elf_each_symbol_t ++each_symbol_in(Elf_Data *symtab, const char *strtab, size_t count, ++ unsigned i, ++ enum callback_status (*cb)(GElf_Sym *symbol, ++ const char *name, void *data), ++ void *data) ++{ ++ for (; i < count; ++i) { ++ GElf_Sym sym; ++ if (gelf_getsym(symtab, i, &sym) == NULL) ++ return (struct elf_each_symbol_t){ i, -2 }; ++ ++ switch (cb(&sym, strtab + sym.st_name, data)) { ++ case CBS_FAIL: ++ return (struct elf_each_symbol_t){ i, -1 }; ++ case CBS_STOP: ++ return (struct elf_each_symbol_t){ i + 1, 0 }; ++ case CBS_CONT: ++ break; ++ } ++ } ++ ++ return (struct elf_each_symbol_t){ 0, 0 }; ++} ++ ++/* N.B.: gelf_getsym takes integer argument. Since negative values ++ * are invalid as indices, we can use the extra bit to encode which ++ * symbol table we are looking into. ltrace currently doesn't handle ++ * more than two symbol tables anyway, nor does it handle the xindex ++ * stuff. */ ++struct elf_each_symbol_t ++elf_each_symbol(struct ltelf *lte, unsigned start_after, ++ enum callback_status (*cb)(GElf_Sym *symbol, ++ const char *name, void *data), ++ void *data) ++{ ++ unsigned index = start_after == 0 ? 0 : start_after >> 1; ++ ++ /* Go through static symbol table first. */ ++ if ((start_after & 0x1) == 0) { ++ struct elf_each_symbol_t st ++ = each_symbol_in(lte->symtab, lte->strtab, ++ lte->symtab_count, index, cb, data); ++ ++ /* If the iteration stopped prematurely, bail out. */ ++ if (st.restart != 0) ++ return ((struct elf_each_symbol_t) ++ { st.restart << 1, st.status }); ++ } ++ ++ struct elf_each_symbol_t st ++ = each_symbol_in(lte->dynsym, lte->dynstr, lte->dynsym_count, ++ index, cb, data); ++ if (st.restart != 0) ++ return ((struct elf_each_symbol_t) ++ { st.restart << 1 | 0x1, st.status }); ++ ++ return (struct elf_each_symbol_t){ 0, 0 }; ++} ++ + int + elf_get_section_named(struct ltelf *lte, const char *name, + Elf_Scn **tgt_sec, GElf_Shdr *tgt_shdr) +Index: b/ltrace-elf.h +=================================================================== +--- a/ltrace-elf.h ++++ b/ltrace-elf.h +@@ -26,6 +26,7 @@ + + #include + #include ++#include + #include "sysdep.h" + #include "vect.h" + +@@ -101,6 +102,17 @@ int elf_get_section_type(struct ltelf *l + int elf_get_section_named(struct ltelf *lte, const char *name, + Elf_Scn **tgt_sec, GElf_Shdr *tgt_shdr); + ++/* Iterate through all symbols in LTE. See callback.h for notes on ++ * iteration interfaces. START_AFTER is 0 in initial call. */ ++struct elf_each_symbol_t { ++ unsigned restart; ++ int status; ++} elf_each_symbol(struct ltelf *lte, unsigned start_after, ++ enum callback_status (*cb)(GElf_Sym *symbol, ++ const char *name, ++ void *data), ++ void *data); ++ + /* Read, respectively, 1, 2, 4, or 8 bytes from Elf data at given + * OFFSET, and store it in *RETP. Returns 0 on success or a negative + * value if there's not enough data. */ diff -Nru ltrace-0.7.3/debian/patches/add_elf_read_next_u_439ab5bf.patch ltrace-0.7.3/debian/patches/add_elf_read_next_u_439ab5bf.patch --- ltrace-0.7.3/debian/patches/add_elf_read_next_u_439ab5bf.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/add_elf_read_next_u_439ab5bf.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,58 @@ +From 439ab5bfac8588e52c77e22c96fb397787512d0e Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Tue, 5 Feb 2013 01:50:28 +0100 +Subject: Add elf_read_next_u* + +For stream-like reading of ELF data. +--- + ltrace-elf.c | 18 ++++++++++++++++++ + ltrace-elf.h | 7 +++++++ + 2 files changed, 25 insertions(+) + +Index: b/ltrace-elf.c +=================================================================== +--- a/ltrace-elf.c ++++ b/ltrace-elf.c +@@ -242,6 +242,24 @@ DEF_READER(elf_read_u64, 64) + + #undef DEF_READER + ++#define DEF_READER(NAME, SIZE) \ ++ int \ ++ NAME(Elf_Data *data, GElf_Xword *offset, uint##SIZE##_t *retp) \ ++ { \ ++ int rc = elf_read_u##SIZE(data, *offset, retp); \ ++ if (rc < 0) \ ++ return rc; \ ++ *offset += SIZE / 8; \ ++ return 0; \ ++ } ++ ++DEF_READER(elf_read_next_u8, 8) ++DEF_READER(elf_read_next_u16, 16) ++DEF_READER(elf_read_next_u32, 32) ++DEF_READER(elf_read_next_u64, 64) ++ ++#undef DEF_READER ++ + int + ltelf_init(struct ltelf *lte, const char *filename) + { +Index: b/ltrace-elf.h +=================================================================== +--- a/ltrace-elf.h ++++ b/ltrace-elf.h +@@ -109,6 +109,13 @@ int elf_read_u16(Elf_Data *data, GElf_Xw + int elf_read_u32(Elf_Data *data, GElf_Xword offset, uint32_t *retp); + int elf_read_u64(Elf_Data *data, GElf_Xword offset, uint64_t *retp); + ++/* These are same as above, but update *OFFSET with the width ++ * of read datum. */ ++int elf_read_next_u8(Elf_Data *data, GElf_Xword *offset, uint8_t *retp); ++int elf_read_next_u16(Elf_Data *data, GElf_Xword *offset, uint16_t *retp); ++int elf_read_next_u32(Elf_Data *data, GElf_Xword *offset, uint32_t *retp); ++int elf_read_next_u64(Elf_Data *data, GElf_Xword *offset, uint64_t *retp); ++ + #if __WORDSIZE == 32 + #define PRI_ELF_ADDR PRIx32 + #define GELF_ADDR_CAST(x) (void *)(uint32_t)(x) diff -Nru ltrace-0.7.3/debian/patches/add_elf_read_u8_3c636fb7.patch ltrace-0.7.3/debian/patches/add_elf_read_u8_3c636fb7.patch --- ltrace-0.7.3/debian/patches/add_elf_read_u8_3c636fb7.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/add_elf_read_u8_3c636fb7.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,40 @@ +From 3c636fb789a29cac0c8f7f0982fb17afeee489dc Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Tue, 5 Feb 2013 01:48:54 +0100 +Subject: Add elf_read_u8 + +--- + ltrace-elf.c | 1 + + ltrace-elf.h | 7 ++++--- + 2 files changed, 5 insertions(+), 3 deletions(-) + +Index: b/ltrace-elf.c +=================================================================== +--- a/ltrace-elf.c ++++ b/ltrace-elf.c +@@ -235,6 +235,7 @@ need_data(Elf_Data *data, GElf_Xword off + return 0; \ + } + ++DEF_READER(elf_read_u8, 8) + DEF_READER(elf_read_u16, 16) + DEF_READER(elf_read_u32, 32) + DEF_READER(elf_read_u64, 64) +Index: b/ltrace-elf.h +=================================================================== +--- a/ltrace-elf.h ++++ b/ltrace-elf.h +@@ -101,9 +101,10 @@ int elf_get_section_type(struct ltelf *l + int elf_get_section_named(struct ltelf *lte, const char *name, + Elf_Scn **tgt_sec, GElf_Shdr *tgt_shdr); + +-/* Read, respectively, 2, 4, or 8 bytes from Elf data at given OFFSET, +- * and store it in *RETP. Returns 0 on success or a negative value if +- * there's not enough data. */ ++/* Read, respectively, 1, 2, 4, or 8 bytes from Elf data at given ++ * OFFSET, and store it in *RETP. Returns 0 on success or a negative ++ * value if there's not enough data. */ ++int elf_read_u8(Elf_Data *data, GElf_Xword offset, uint8_t *retp); + int elf_read_u16(Elf_Data *data, GElf_Xword offset, uint16_t *retp); + int elf_read_u32(Elf_Data *data, GElf_Xword offset, uint32_t *retp); + int elf_read_u64(Elf_Data *data, GElf_Xword offset, uint64_t *retp); diff -Nru ltrace-0.7.3/debian/patches/add_irelative_tracing_b420a226.patch ltrace-0.7.3/debian/patches/add_irelative_tracing_b420a226.patch --- ltrace-0.7.3/debian/patches/add_irelative_tracing_b420a226.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/add_irelative_tracing_b420a226.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,229 @@ +From b420a226cd2fc5d6028adcaf236c512a1f1fb437 Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Tue, 15 Oct 2013 10:46:28 +0200 +Subject: Add support for tracing of IRELATIVE PLT entries + +- Because the IRELATIVE entries have no associated symbol name, we + need to allow arch_elf_add_plt_entry to override the name. This is + done by that callback returning PLT_OK and returning the new symbol + via libsym-chain return argument. Filtering is postponed until we + have that symbol, and the filter is applied to the whole returned + chain. + +- Add linux_elf_add_plt_entry_irelative to support proper naming of + IRELATIVE PLT entries. This needs to be called from arch backend, + as the numbers of IRELATIVE relocations differ per-architecture. +--- + ltrace-elf.c | 43 +++++++++++++++++++++-------- + sysdeps/linux-gnu/trace.c | 68 +++++++++++++++++++++++++++++++++++++++++++--- + sysdeps/linux-gnu/trace.h | 18 ++++++++++++ + 3 files changed, 115 insertions(+), 14 deletions(-) + +Index: b/ltrace-elf.c +=================================================================== +--- a/ltrace-elf.c ++++ b/ltrace-elf.c +@@ -539,6 +539,24 @@ mark_chain_latent(struct library_symbol + } + } + ++static void ++filter_symbol_chain(struct filter *filter, ++ struct library_symbol **libsymp, struct library *lib) ++{ ++ assert(libsymp != NULL); ++ struct library_symbol **ptr = libsymp; ++ while (*ptr != NULL) { ++ if (filter_matches_symbol(filter, (*ptr)->name, lib)) { ++ ptr = &(*ptr)->next; ++ } else { ++ struct library_symbol *sym = *ptr; ++ *ptr = (*ptr)->next; ++ library_symbol_destroy(sym); ++ free(sym); ++ } ++ } ++} ++ + static int + populate_plt(struct Process *proc, const char *filename, + struct ltelf *lte, struct library *lib, +@@ -554,30 +572,34 @@ populate_plt(struct Process *proc, const + + char const *name = lte->dynstr + sym.st_name; + +- /* If the symbol wasn't matched, reject it, unless we +- * need to keep latent PLT breakpoints for tracing +- * exports. */ + int matched = filter_matches_symbol(options.plt_filter, + name, lib); +- if (!matched && !latent_plts) +- continue; +- + struct library_symbol *libsym = NULL; + switch (arch_elf_add_plt_entry(proc, lte, name, + &rela, i, &libsym)) { ++ case plt_fail: ++ return -1; ++ + case plt_default: ++ /* Add default entry to the beginning of LIBSYM. */ + if (default_elf_add_plt_entry(proc, lte, name, + &rela, i, &libsym) < 0) +- /* fall-through */ +- case plt_fail: + return -1; +- /* fall-through */ + case plt_ok: ++ /* If we didn't match the PLT entry up there, ++ * filter the chain to only include the ++ * matching symbols (but include all if we are ++ * adding latent symbols). This is to allow ++ * arch_elf_add_plt_entry to override the PLT ++ * symbol's name. */ ++ if (!matched && !latent_plts) ++ filter_symbol_chain(options.plt_filter, ++ &libsym, lib); + if (libsym != NULL) { + /* If we are adding those symbols just + * for tracing exports, mark them all + * latent. */ +- if (!matched) ++ if (!matched && latent_plts) + mark_chain_latent(libsym); + library_add_symbol(lib, libsym); + } +@@ -657,7 +679,6 @@ populate_this_symtab(struct Process *pro + continue; + } + +- /* XXX support IFUNC as well. */ + if (GELF_ST_TYPE(sym.st_info) != STT_FUNC + || sym.st_value == 0 + || sym.st_shndx == STN_UNDEF) +Index: b/sysdeps/linux-gnu/trace.c +=================================================================== +--- a/sysdeps/linux-gnu/trace.c ++++ b/sysdeps/linux-gnu/trace.c +@@ -24,25 +24,29 @@ + #include "config.h" + + #include +-#include +-#include + #include + #include ++#include ++#include ++#include + #include + #include + #include ++#include ++#include + #include + + #ifdef HAVE_LIBSELINUX + # include + #endif + +-#include "linux-gnu/trace.h" + #include "linux-gnu/trace-defs.h" ++#include "linux-gnu/trace.h" + #include "backend.h" + #include "breakpoint.h" + #include "debug.h" + #include "events.h" ++#include "ltrace-elf.h" + #include "options.h" + #include "proc.h" + #include "ptrace.h" +@@ -1212,3 +1216,61 @@ umovebytes(Process *proc, void *addr, vo + + return bytes_read; + } ++ ++struct irelative_name_data_t { ++ GElf_Addr addr; ++ const char *found_name; ++}; ++ ++static enum callback_status ++irelative_name_cb(GElf_Sym *symbol, const char *name, void *d) ++{ ++ struct irelative_name_data_t *data = d; ++ ++ if (symbol->st_value == data->addr) { ++ bool is_ifunc = false; ++#ifdef STT_GNU_IFUNC ++ is_ifunc = GELF_ST_TYPE(symbol->st_info) == STT_GNU_IFUNC; ++#endif ++ data->found_name = name; ++ ++ /* Keep looking, unless we found the actual IFUNC ++ * symbol. What we matched may have been a symbol ++ * denoting the resolver function, which would have ++ * the same address. */ ++ return CBS_STOP_IF(is_ifunc); ++ } ++ ++ return CBS_CONT; ++} ++ ++enum plt_status ++linux_elf_add_plt_entry_irelative(struct Process *proc, struct ltelf *lte, ++ GElf_Rela *rela, size_t ndx, ++ struct library_symbol **ret) ++ ++{ ++ struct irelative_name_data_t data = { rela->r_addend, NULL }; ++ if (rela->r_addend != 0 ++ && elf_each_symbol(lte, 0, ++ irelative_name_cb, &data).status < 0) ++ return -1; ++ ++ const char *name; ++ if (data.found_name != NULL) { ++ name = data.found_name; ++ } else { ++#define NAME "IREL." ++ /* NAME\0 + 0x + digits. */ ++ char *tmp_name = alloca(sizeof NAME + 2 + 16); ++ sprintf(tmp_name, NAME "%#" PRIx64, ++ (uint64_t)rela->r_addend); ++ name = tmp_name; ++#undef NAME ++ } ++ ++ if (default_elf_add_plt_entry(proc, lte, name, rela, ndx, ret) < 0) ++ return PLT_FAIL; ++ ++ return PLT_OK; ++} +Index: b/sysdeps/linux-gnu/trace.h +=================================================================== +--- a/sysdeps/linux-gnu/trace.h ++++ b/sysdeps/linux-gnu/trace.h +@@ -118,4 +118,22 @@ int process_install_stopping_handler + void linux_ptrace_disable_and_singlestep(struct process_stopping_handler *self); + void linux_ptrace_disable_and_continue(struct process_stopping_handler *self); + ++/* When main binary needs to call an IFUNC function defined in the ++ * binary itself, a PLT entry is set up so that dynamic linker can get ++ * involved and resolve the symbol. But unlike other PLT relocation, ++ * this one can't rely on symbol table being available. So it doesn't ++ * reference the symbol by its name, but by its address, and ++ * correspondingly, has another type. When arch backend wishes to ++ * support these IRELATIVE relocations, it should override ++ * arch_elf_add_plt_entry and dispatch to this function for IRELATIVE ++ * relocations. ++ * ++ * This function behaves as arch_elf_add_plt_entry, except that it ++ * doesn't take name for a parameter, but instead looks up the name in ++ * symbol tables in LTE. */ ++enum plt_status linux_elf_add_plt_entry_irelative(struct Process *proc, ++ struct ltelf *lte, ++ GElf_Rela *rela, size_t ndx, ++ struct library_symbol **ret); ++ + #endif /* _LTRACE_LINUX_TRACE_H_ */ diff -Nru ltrace-0.7.3/debian/patches/Add-missing-include-stdio.h.patch ltrace-0.7.3/debian/patches/Add-missing-include-stdio.h.patch --- ltrace-0.7.3/debian/patches/Add-missing-include-stdio.h.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/Add-missing-include-stdio.h.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,30 @@ +Author: dann frazier +Description: Add missing #include stdio.h + Include stdio.h in files that use fprintf() +Applied-Upstream: http://anonscm.debian.org/gitweb/?p=collab-maint/ltrace.git;a=commitdiff;h=3c4a0de0be06377caf43a5f9e1682b05ef7299f9 +Last-Update: 2014-03-17 + +Index: ltrace/sysdeps/linux-gnu/aarch64/regs.c +=================================================================== +--- ltrace.orig/sysdeps/linux-gnu/aarch64/regs.c 2014-03-12 16:23:25.382866486 -0600 ++++ ltrace/sysdeps/linux-gnu/aarch64/regs.c 2014-03-12 16:26:20.989198876 -0600 +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + + #include "backend.h" + #include "proc.h" +Index: ltrace/sysdeps/linux-gnu/aarch64/trace.c +=================================================================== +--- ltrace.orig/sysdeps/linux-gnu/aarch64/trace.c 2014-03-12 16:23:25.382866486 -0600 ++++ ltrace/sysdeps/linux-gnu/aarch64/trace.c 2014-03-12 16:26:20.989198876 -0600 +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + #include + + #include "backend.h" diff -Nru ltrace-0.7.3/debian/patches/add-missing-stdint.h-include.patch ltrace-0.7.3/debian/patches/add-missing-stdint.h-include.patch --- ltrace-0.7.3/debian/patches/add-missing-stdint.h-include.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/add-missing-stdint.h-include.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,20 @@ +Author: dann frazier +Description: add missing include + aarch64's fetch.c uses the uintptr_t typedef defined by stdint.h without + including it. This doesn't currently cause a build failure because stdint.h + is indirectly included via proc.h. +Applied-Upstream: http://anonscm.debian.org/gitweb/?p=collab-maint/ltrace.git;a=commitdiff;h=e9919d980a5b6fc2417d7c05d46329b442467940 +Last-Update: 2014-03-17 + +Index: ltrace/sysdeps/linux-gnu/aarch64/fetch.c +=================================================================== +--- ltrace.orig/sysdeps/linux-gnu/aarch64/fetch.c 2014-03-12 16:23:25.382866486 -0600 ++++ ltrace/sysdeps/linux-gnu/aarch64/fetch.c 2014-03-12 16:25:38.492638417 -0600 +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + + #include "fetch.h" + #include "proc.h" diff -Nru ltrace-0.7.3/debian/patches/arm_attr_decoding_df7d2311.patch ltrace-0.7.3/debian/patches/arm_attr_decoding_df7d2311.patch --- ltrace-0.7.3/debian/patches/arm_attr_decoding_df7d2311.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/arm_attr_decoding_df7d2311.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,294 @@ +From df7d23111899c1e1aa16bad83e0db0a8334b11d9 Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Tue, 5 Feb 2013 03:27:04 +0100 +Subject: Implement decoding ARM attribute section + +This to determine whether hardfp is used in the process. +--- + sysdeps/linux-gnu/arm/Makefile.am | 2 + sysdeps/linux-gnu/arm/arch.h | 16 ++ + sysdeps/linux-gnu/arm/fetch.c | 224 ++++++++++++++++++++++++++++++++++++++ + 3 files changed, 242 insertions(+) + create mode 100644 sysdeps/linux-gnu/arm/fetch.c + +Index: b/sysdeps/linux-gnu/arm/Makefile.am +=================================================================== +--- a/sysdeps/linux-gnu/arm/Makefile.am ++++ b/sysdeps/linux-gnu/arm/Makefile.am +@@ -21,6 +21,7 @@ noinst_LTLIBRARIES = \ + + ___libcpu_la_SOURCES = \ + breakpoint.c \ ++ fetch.c \ + plt.c \ + regs.c \ + trace.c +@@ -34,3 +35,4 @@ noinst_HEADERS = \ + + MAINTAINERCLEANFILES = \ + Makefile.in ++ +Index: b/sysdeps/linux-gnu/arm/arch.h +=================================================================== +--- a/sysdeps/linux-gnu/arm/arch.h ++++ b/sysdeps/linux-gnu/arm/arch.h +@@ -18,6 +18,9 @@ + * 02110-1301 USA + */ + ++#ifndef LTRACE_ARM_ARCH_H ++#define LTRACE_ARM_ARCH_H ++ + #define ARCH_HAVE_ENABLE_BREAKPOINT 1 + #define ARCH_HAVE_DISABLE_BREAKPOINT 1 + +@@ -31,7 +34,20 @@ + #define LT_ELFCLASS ELFCLASS32 + #define LT_ELF_MACHINE EM_ARM + ++#define ARCH_HAVE_FETCH_ARG + #define ARCH_HAVE_BREAKPOINT_DATA + struct arch_breakpoint_data { + int thumb_mode; + }; ++ ++#define ARCH_HAVE_LTELF_DATA ++struct arch_ltelf_data { ++ /* We have this only for the hooks. */ ++}; ++ ++#define ARCH_HAVE_LIBRARY_DATA ++struct arch_library_data { ++ unsigned int hardfp:1; ++}; ++ ++#endif /* LTRACE_ARM_ARCH_H */ +Index: b/sysdeps/linux-gnu/arm/fetch.c +=================================================================== +--- /dev/null ++++ b/sysdeps/linux-gnu/arm/fetch.c +@@ -0,0 +1,224 @@ ++/* ++ * This file is part of ltrace. ++ * Copyright (C) 2013 Petr Machata, Red Hat Inc. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "backend.h" ++#include "fetch.h" ++#include "library.h" ++#include "ltrace-elf.h" ++#include "proc.h" ++#include "ptrace.h" ++#include "type.h" ++#include "value.h" ++ ++static int ++get_hardfp(uint64_t abi_vfp_args) ++{ ++ if (abi_vfp_args == 2) ++ fprintf(stderr, ++ "Tag_ABI_VFP_args value 2 (tool chain-specific " ++ "conventions) not supported.\n"); ++ return abi_vfp_args == 1; ++} ++ ++int ++arch_elf_init(struct ltelf *lte, struct library *lib) ++{ ++ /* Nothing in this section is strictly critical. It's not ++ * that much of a deal if we fail to guess right whether the ++ * ABI is softfp or hardfp. */ ++ unsigned hardfp = 0; ++ ++ Elf_Scn *scn; ++ Elf_Data *data; ++ GElf_Shdr shdr; ++ if (elf_get_section_type(lte, SHT_ARM_ATTRIBUTES, &scn, &shdr) < 0 ++ || (scn != NULL && (data = elf_loaddata(scn, &shdr)) == NULL)) { ++ fprintf(stderr, ++ "Error when obtaining ARM attribute section: %s\n", ++ elf_errmsg(-1)); ++ goto done; ++ ++ } else if (scn != NULL && data != NULL) { ++ GElf_Xword offset = 0; ++ uint8_t version; ++ if (elf_read_next_u8(data, &offset, &version) < 0) { ++ goto done; ++ } else if (version != 'A') { ++ fprintf(stderr, "Unsupported ARM attribute section " ++ "version %d ('%c').\n", version, version); ++ goto done; ++ } ++ ++ do { ++ const char signature[] = "aeabi"; ++ /* N.B. LEN is including the length field ++ * itself. */ ++ uint32_t sec_len; ++ if (elf_read_u32(data, offset, &sec_len) < 0 ++ || !elf_can_read_next(data, offset, sec_len)) { ++ goto done; ++ } ++ const GElf_Xword next_offset = offset + sec_len; ++ offset += 4; ++ ++ if (sec_len < 4 + sizeof signature ++ || strcmp(signature, data->d_buf + offset) != 0) ++ goto skip; ++ offset += sizeof signature; ++ ++ const GElf_Xword offset0 = offset; ++ uint64_t tag; ++ uint32_t sub_len; ++ if (elf_read_next_uleb128(data, &offset, &tag) < 0 ++ || elf_read_next_u32(data, &offset, &sub_len) < 0 ++ || !elf_can_read_next(data, offset0, sub_len)) ++ goto done; ++ ++ if (tag != 1) ++ /* IHI0045D_ABI_addenda: "section and ++ * symbol attributes are deprecated ++ * [...] consumers are permitted to ++ * ignore them." */ ++ goto skip; ++ ++ while (offset < offset0 + sub_len) { ++ if (elf_read_next_uleb128(data, ++ &offset, &tag) < 0) ++ goto done; ++ ++ switch (tag) { ++ uint64_t v; ++ case 6: /* Tag_CPU_arch */ ++ case 7: /* Tag_CPU_arch_profile */ ++ case 8: /* Tag_ARM_ISA_use */ ++ case 9: /* Tag_THUMB_ISA_use */ ++ case 10: /* Tag_FP_arch */ ++ case 11: /* Tag_WMMX_arch */ ++ case 12: /* Tag_Advanced_SIMD_arch */ ++ case 13: /* Tag_PCS_config */ ++ case 14: /* Tag_ABI_PCS_R9_use */ ++ case 15: /* Tag_ABI_PCS_RW_data */ ++ case 16: /* Tag_ABI_PCS_RO_data */ ++ case 17: /* Tag_ABI_PCS_GOT_use */ ++ case 18: /* Tag_ABI_PCS_wchar_t */ ++ case 19: /* Tag_ABI_FP_rounding */ ++ case 20: /* Tag_ABI_FP_denormal */ ++ case 21: /* Tag_ABI_FP_exceptions */ ++ case 22: /* Tag_ABI_FP_user_exceptions */ ++ case 23: /* Tag_ABI_FP_number_model */ ++ case 24: /* Tag_ABI_align_needed */ ++ case 25: /* Tag_ABI_align_preserved */ ++ case 26: /* Tag_ABI_enum_size */ ++ case 27: /* Tag_ABI_HardFP_use */ ++ case 28: /* Tag_ABI_VFP_args */ ++ case 29: /* Tag_ABI_WMMX_args */ ++ case 30: /* Tag_ABI_optimization_goals */ ++ case 31: /* Tag_ABI_FP_optimization_goals */ ++ case 32: /* Tag_compatibility */ ++ case 34: /* Tag_CPU_unaligned_access */ ++ case 36: /* Tag_FP_HP_extension */ ++ case 38: /* Tag_ABI_FP_16bit_format */ ++ case 42: /* Tag_MPextension_use */ ++ case 70: /* Tag_MPextension_use as well */ ++ case 44: /* Tag_DIV_use */ ++ case 64: /* Tag_nodefaults */ ++ case 66: /* Tag_T2EE_use */ ++ case 68: /* Tag_Virtualization_use */ ++ uleb128: ++ if (elf_read_next_uleb128 ++ (data, &offset, &v) < 0) ++ goto done; ++ if (tag == 28) ++ hardfp = get_hardfp(v); ++ if (tag != 32) ++ continue; ++ ++ /* Tag 32 has two arguments, ++ * fall through. */ ++ ++ case 4: /* Tag_CPU_raw_name */ ++ case 5: /* Tag_CPU_name */ ++ case 65: /* Tag_also_compatible_with */ ++ case 67: /* Tag_conformance */ ++ ntbs: ++ offset += strlen(data->d_buf ++ + offset) + 1; ++ continue; ++ } ++ ++ /* Handle unknown tags in a generic ++ * manner, if possible. */ ++ if (tag <= 32) { ++ fprintf(stderr, ++ "Unknown tag %lld " ++ "at offset %#llx " ++ "of ARM attribute section.", ++ tag, offset); ++ goto skip; ++ } else if (tag % 2 == 0) { ++ goto uleb128; ++ } else { ++ goto ntbs; ++ } ++ } ++ ++ skip: ++ offset = next_offset; ++ ++ } while (elf_can_read_next(data, offset, 1)); ++ ++ } ++ ++done: ++ lib->arch.hardfp = hardfp; ++ return 0; ++} ++ ++void ++arch_elf_destroy(struct ltelf *lte) ++{ ++} ++ ++void ++arch_library_init(struct library *lib) ++{ ++} ++ ++void ++arch_library_destroy(struct library *lib) ++{ ++} ++ ++void ++arch_library_clone(struct library *retp, struct library *lib) ++{ ++ retp->arch = lib->arch; ++} diff -Nru ltrace-0.7.3/debian/patches/arm_backend_fixes_1383e5bd.patch ltrace-0.7.3/debian/patches/arm_backend_fixes_1383e5bd.patch --- ltrace-0.7.3/debian/patches/arm_backend_fixes_1383e5bd.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/arm_backend_fixes_1383e5bd.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,434 @@ +From 1383e5bd876baa123b39210c2210d256bcfa5bec Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Mon, 4 Nov 2013 22:48:38 -0500 +Subject: In ARM backend, move hooks for ltrace_elf and library to plt.c + +- That's the customary location for backend hooks. +--- + sysdeps/linux-gnu/arm/fetch.c | 186 ---------------------------------------- + sysdeps/linux-gnu/arm/plt.c | 194 +++++++++++++++++++++++++++++++++++++++++- + 2 files changed, 192 insertions(+), 188 deletions(-) + +Index: b/sysdeps/linux-gnu/arm/fetch.c +=================================================================== +--- a/sysdeps/linux-gnu/arm/fetch.c ++++ b/sysdeps/linux-gnu/arm/fetch.c +@@ -32,197 +32,11 @@ + #include "backend.h" + #include "fetch.h" + #include "library.h" +-#include "ltrace-elf.h" + #include "proc.h" + #include "ptrace.h" + #include "type.h" + #include "value.h" + +-static int +-get_hardfp(uint64_t abi_vfp_args) +-{ +- if (abi_vfp_args == 2) +- fprintf(stderr, +- "Tag_ABI_VFP_args value 2 (tool chain-specific " +- "conventions) not supported.\n"); +- return abi_vfp_args == 1; +-} +- +-int +-arch_elf_init(struct ltelf *lte, struct library *lib) +-{ +- /* Nothing in this section is strictly critical. It's not +- * that much of a deal if we fail to guess right whether the +- * ABI is softfp or hardfp. */ +- unsigned hardfp = 0; +- +- Elf_Scn *scn; +- Elf_Data *data; +- GElf_Shdr shdr; +- if (elf_get_section_type(lte, SHT_ARM_ATTRIBUTES, &scn, &shdr) < 0 +- || (scn != NULL && (data = elf_loaddata(scn, &shdr)) == NULL)) { +- fprintf(stderr, +- "Error when obtaining ARM attribute section: %s\n", +- elf_errmsg(-1)); +- goto done; +- +- } else if (scn != NULL && data != NULL) { +- GElf_Xword offset = 0; +- uint8_t version; +- if (elf_read_next_u8(data, &offset, &version) < 0) { +- goto done; +- } else if (version != 'A') { +- fprintf(stderr, "Unsupported ARM attribute section " +- "version %d ('%c').\n", version, version); +- goto done; +- } +- +- do { +- const char signature[] = "aeabi"; +- /* N.B. LEN is including the length field +- * itself. */ +- uint32_t sec_len; +- if (elf_read_u32(data, offset, &sec_len) < 0 +- || !elf_can_read_next(data, offset, sec_len)) { +- goto done; +- } +- const GElf_Xword next_offset = offset + sec_len; +- offset += 4; +- +- if (sec_len < 4 + sizeof signature +- || strcmp(signature, data->d_buf + offset) != 0) +- goto skip; +- offset += sizeof signature; +- +- const GElf_Xword offset0 = offset; +- uint64_t tag; +- uint32_t sub_len; +- if (elf_read_next_uleb128(data, &offset, &tag) < 0 +- || elf_read_next_u32(data, &offset, &sub_len) < 0 +- || !elf_can_read_next(data, offset0, sub_len)) +- goto done; +- +- if (tag != 1) +- /* IHI0045D_ABI_addenda: "section and +- * symbol attributes are deprecated +- * [...] consumers are permitted to +- * ignore them." */ +- goto skip; +- +- while (offset < offset0 + sub_len) { +- if (elf_read_next_uleb128(data, +- &offset, &tag) < 0) +- goto done; +- +- switch (tag) { +- uint64_t v; +- case 6: /* Tag_CPU_arch */ +- case 7: /* Tag_CPU_arch_profile */ +- case 8: /* Tag_ARM_ISA_use */ +- case 9: /* Tag_THUMB_ISA_use */ +- case 10: /* Tag_FP_arch */ +- case 11: /* Tag_WMMX_arch */ +- case 12: /* Tag_Advanced_SIMD_arch */ +- case 13: /* Tag_PCS_config */ +- case 14: /* Tag_ABI_PCS_R9_use */ +- case 15: /* Tag_ABI_PCS_RW_data */ +- case 16: /* Tag_ABI_PCS_RO_data */ +- case 17: /* Tag_ABI_PCS_GOT_use */ +- case 18: /* Tag_ABI_PCS_wchar_t */ +- case 19: /* Tag_ABI_FP_rounding */ +- case 20: /* Tag_ABI_FP_denormal */ +- case 21: /* Tag_ABI_FP_exceptions */ +- case 22: /* Tag_ABI_FP_user_exceptions */ +- case 23: /* Tag_ABI_FP_number_model */ +- case 24: /* Tag_ABI_align_needed */ +- case 25: /* Tag_ABI_align_preserved */ +- case 26: /* Tag_ABI_enum_size */ +- case 27: /* Tag_ABI_HardFP_use */ +- case 28: /* Tag_ABI_VFP_args */ +- case 29: /* Tag_ABI_WMMX_args */ +- case 30: /* Tag_ABI_optimization_goals */ +- case 31: /* Tag_ABI_FP_optimization_goals */ +- case 32: /* Tag_compatibility */ +- case 34: /* Tag_CPU_unaligned_access */ +- case 36: /* Tag_FP_HP_extension */ +- case 38: /* Tag_ABI_FP_16bit_format */ +- case 42: /* Tag_MPextension_use */ +- case 70: /* Tag_MPextension_use as well */ +- case 44: /* Tag_DIV_use */ +- case 64: /* Tag_nodefaults */ +- case 66: /* Tag_T2EE_use */ +- case 68: /* Tag_Virtualization_use */ +- uleb128: +- if (elf_read_next_uleb128 +- (data, &offset, &v) < 0) +- goto done; +- if (tag == 28) +- hardfp = get_hardfp(v); +- if (tag != 32) +- continue; +- +- /* Tag 32 has two arguments, +- * fall through. */ +- +- case 4: /* Tag_CPU_raw_name */ +- case 5: /* Tag_CPU_name */ +- case 65: /* Tag_also_compatible_with */ +- case 67: /* Tag_conformance */ +- ntbs: +- offset += strlen(data->d_buf +- + offset) + 1; +- continue; +- } +- +- /* Handle unknown tags in a generic +- * manner, if possible. */ +- if (tag <= 32) { +- fprintf(stderr, +- "Unknown tag %lld " +- "at offset %#llx " +- "of ARM attribute section.", +- tag, offset); +- goto skip; +- } else if (tag % 2 == 0) { +- goto uleb128; +- } else { +- goto ntbs; +- } +- } +- +- skip: +- offset = next_offset; +- +- } while (elf_can_read_next(data, offset, 1)); +- +- } +- +-done: +- lib->arch.hardfp = hardfp; +- return 0; +-} +- +-void +-arch_elf_destroy(struct ltelf *lte) +-{ +-} +- +-void +-arch_library_init(struct library *lib) +-{ +-} +- +-void +-arch_library_destroy(struct library *lib) +-{ +-} +- +-void +-arch_library_clone(struct library *retp, struct library *lib) +-{ +- retp->arch = lib->arch; +-} +- + enum { + /* How many (double) VFP registers the AAPCS uses for + * parameter passing. */ +Index: b/sysdeps/linux-gnu/arm/plt.c +=================================================================== +--- a/sysdeps/linux-gnu/arm/plt.c ++++ b/sysdeps/linux-gnu/arm/plt.c +@@ -20,20 +20,192 @@ + */ + + #include ++#include ++#include + + #include "proc.h" + #include "library.h" + #include "ltrace-elf.h" + + static int ++get_hardfp(uint64_t abi_vfp_args) ++{ ++ if (abi_vfp_args == 2) ++ fprintf(stderr, ++ "Tag_ABI_VFP_args value 2 (tool chain-specific " ++ "conventions) not supported.\n"); ++ return abi_vfp_args == 1; ++} ++ ++int ++arch_elf_init(struct ltelf *lte, struct library *lib) ++{ ++ /* Nothing in this section is strictly critical. It's not ++ * that much of a deal if we fail to guess right whether the ++ * ABI is softfp or hardfp. */ ++ unsigned hardfp = 0; ++ ++ Elf_Scn *scn; ++ Elf_Data *data; ++ GElf_Shdr shdr; ++ if (elf_get_section_type(lte, SHT_ARM_ATTRIBUTES, &scn, &shdr) < 0 ++ || (scn != NULL && (data = elf_loaddata(scn, &shdr)) == NULL)) { ++ fprintf(stderr, ++ "Error when obtaining ARM attribute section: %s\n", ++ elf_errmsg(-1)); ++ goto done; ++ ++ } else if (scn != NULL && data != NULL) { ++ GElf_Xword offset = 0; ++ uint8_t version; ++ if (elf_read_next_u8(data, &offset, &version) < 0) { ++ goto done; ++ } else if (version != 'A') { ++ fprintf(stderr, "Unsupported ARM attribute section " ++ "version %d ('%c').\n", version, version); ++ goto done; ++ } ++ ++ do { ++ const char signature[] = "aeabi"; ++ /* N.B. LEN is including the length field ++ * itself. */ ++ uint32_t sec_len; ++ if (elf_read_u32(data, offset, &sec_len) < 0 ++ || !elf_can_read_next(data, offset, sec_len)) { ++ goto done; ++ } ++ const GElf_Xword next_offset = offset + sec_len; ++ offset += 4; ++ ++ if (sec_len < 4 + sizeof signature ++ || strcmp(signature, data->d_buf + offset) != 0) ++ goto skip; ++ offset += sizeof signature; ++ ++ const GElf_Xword offset0 = offset; ++ uint64_t tag; ++ uint32_t sub_len; ++ if (elf_read_next_uleb128(data, &offset, &tag) < 0 ++ || elf_read_next_u32(data, &offset, &sub_len) < 0 ++ || !elf_can_read_next(data, offset0, sub_len)) ++ goto done; ++ ++ if (tag != 1) ++ /* IHI0045D_ABI_addenda: "section and ++ * symbol attributes are deprecated ++ * [...] consumers are permitted to ++ * ignore them." */ ++ goto skip; ++ ++ while (offset < offset0 + sub_len) { ++ if (elf_read_next_uleb128(data, ++ &offset, &tag) < 0) ++ goto done; ++ ++ switch (tag) { ++ uint64_t v; ++ case 6: /* Tag_CPU_arch */ ++ case 7: /* Tag_CPU_arch_profile */ ++ case 8: /* Tag_ARM_ISA_use */ ++ case 9: /* Tag_THUMB_ISA_use */ ++ case 10: /* Tag_FP_arch */ ++ case 11: /* Tag_WMMX_arch */ ++ case 12: /* Tag_Advanced_SIMD_arch */ ++ case 13: /* Tag_PCS_config */ ++ case 14: /* Tag_ABI_PCS_R9_use */ ++ case 15: /* Tag_ABI_PCS_RW_data */ ++ case 16: /* Tag_ABI_PCS_RO_data */ ++ case 17: /* Tag_ABI_PCS_GOT_use */ ++ case 18: /* Tag_ABI_PCS_wchar_t */ ++ case 19: /* Tag_ABI_FP_rounding */ ++ case 20: /* Tag_ABI_FP_denormal */ ++ case 21: /* Tag_ABI_FP_exceptions */ ++ case 22: /* Tag_ABI_FP_user_exceptions */ ++ case 23: /* Tag_ABI_FP_number_model */ ++ case 24: /* Tag_ABI_align_needed */ ++ case 25: /* Tag_ABI_align_preserved */ ++ case 26: /* Tag_ABI_enum_size */ ++ case 27: /* Tag_ABI_HardFP_use */ ++ case 28: /* Tag_ABI_VFP_args */ ++ case 29: /* Tag_ABI_WMMX_args */ ++ case 30: /* Tag_ABI_optimization_goals */ ++ case 31: /* Tag_ABI_FP_optimization_goals */ ++ case 32: /* Tag_compatibility */ ++ case 34: /* Tag_CPU_unaligned_access */ ++ case 36: /* Tag_FP_HP_extension */ ++ case 38: /* Tag_ABI_FP_16bit_format */ ++ case 42: /* Tag_MPextension_use */ ++ case 70: /* Tag_MPextension_use as well */ ++ case 44: /* Tag_DIV_use */ ++ case 64: /* Tag_nodefaults */ ++ case 66: /* Tag_T2EE_use */ ++ case 68: /* Tag_Virtualization_use */ ++ uleb128: ++ if (elf_read_next_uleb128 ++ (data, &offset, &v) < 0) ++ goto done; ++ if (tag == 28) ++ hardfp = get_hardfp(v); ++ if (tag != 32) ++ continue; ++ ++ /* Tag 32 has two arguments, ++ * fall through. */ ++ ++ case 4: /* Tag_CPU_raw_name */ ++ case 5: /* Tag_CPU_name */ ++ case 65: /* Tag_also_compatible_with */ ++ case 67: /* Tag_conformance */ ++ ntbs: ++ offset += strlen(data->d_buf ++ + offset) + 1; ++ continue; ++ } ++ ++ /* Handle unknown tags in a generic ++ * manner, if possible. */ ++ if (tag <= 32) { ++ fprintf(stderr, ++ "Unknown tag %lld " ++ "at offset %#llx " ++ "of ARM attribute section.", ++ tag, offset); ++ goto skip; ++ } else if (tag % 2 == 0) { ++ goto uleb128; ++ } else { ++ goto ntbs; ++ } ++ } ++ ++ skip: ++ offset = next_offset; ++ ++ } while (elf_can_read_next(data, offset, 1)); ++ ++ } ++ ++done: ++ lib->arch.hardfp = hardfp; ++ return 0; ++} ++ ++void ++arch_elf_destroy(struct ltelf *lte) ++{ ++} ++ ++static int + arch_plt_entry_has_stub(struct ltelf *lte, size_t off) { +- uint16_t op = *(uint16_t *)((char *)lte->relplt->d_buf + off); ++ char *buf = (char *) lte->arch.jmprel_data->d_buf; ++ uint16_t op = *(uint16_t *) (buf + off); + return op == 0x4778; + } + + GElf_Addr + arch_plt_sym_val(struct ltelf *lte, size_t ndx, GElf_Rela * rela) { +- size_t start = lte->relplt->d_size + 12; ++ size_t start = lte->arch.jmprel_data->d_size + 12; + size_t off = start + 20, i; + for (i = 0; i < ndx; i++) + off += arch_plt_entry_has_stub(lte, off) ? 16 : 12; +@@ -46,3 +218,21 @@ void * + sym2addr(Process *proc, struct library_symbol *sym) { + return sym->enter_addr; + } ++ ++int ++arch_library_init(struct library *lib) ++{ ++ return 0; ++} ++ ++void ++arch_library_destroy(struct library *lib) ++{ ++} ++ ++int ++arch_library_clone(struct library *retp, struct library *lib) ++{ ++ retp->arch = lib->arch; ++ return 0; ++} diff -Nru ltrace-0.7.3/debian/patches/arm_bpc_62fc7747.patch ltrace-0.7.3/debian/patches/arm_bpc_62fc7747.patch --- ltrace-0.7.3/debian/patches/arm_bpc_62fc7747.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/arm_bpc_62fc7747.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,223 @@ +From 62fc7747d37cacfb21381961674c07ebab5f2fb9 Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Tue, 5 Feb 2013 15:42:06 +0100 +Subject: Implement Base Procedure Call Standard for ARM + +arch_fetch_arg_next still doesn't support "hardfp" extensions, but much of +the test suite is now clean, with only 16 failures left. +--- + sysdeps/linux-gnu/arm/fetch.c | 112 +++++++++++++++++++++++++++++++++-- + testsuite/ltrace.main/parameters.exp | 5 - + 2 files changed, 106 insertions(+), 11 deletions(-) + +Index: b/sysdeps/linux-gnu/arm/fetch.c +=================================================================== +--- a/sysdeps/linux-gnu/arm/fetch.c ++++ b/sysdeps/linux-gnu/arm/fetch.c +@@ -232,17 +232,27 @@ struct fetch_context { + }; + uint32_t fpscr; + } fpregs; ++ unsigned ncrn; ++ arch_addr_t sp; ++ arch_addr_t nsaa; ++ arch_addr_t ret_struct; + bool hardfp:1; + }; + + static int + fetch_register_banks(struct Process *proc, struct fetch_context *context) + { ++ if (ptrace(PTRACE_GETREGS, proc->pid, NULL, &context->regs) == -1) ++ return -1; ++ + if (context->hardfp + && ptrace(PTRACE_GETVFPREGS, proc->pid, + NULL, &context->fpregs) == -1) + return -1; + ++ context->ncrn = 0; ++ context->nsaa = context->sp = get_stack_pointer(proc); ++ + return 0; + } + +@@ -251,13 +261,32 @@ arch_fetch_arg_init(enum tof type, struc + struct arg_type_info *ret_info) + { + struct fetch_context *context = malloc(sizeof(*context)); +- context->hardfp = proc->libraries->arch.hardfp; ++ ++ { ++ struct Process *mainp = proc; ++ while (mainp->libraries == NULL && mainp->parent != NULL) ++ mainp = mainp->parent; ++ context->hardfp = mainp->libraries->arch.hardfp; ++ } ++ + if (context == NULL + || fetch_register_banks(proc, context) < 0) { + free(context); + return NULL; + } + ++ if (ret_info->type == ARGTYPE_STRUCT ++ || ret_info->type == ARGTYPE_ARRAY) { ++ size_t sz = type_sizeof(proc, ret_info); ++ assert(sz != (size_t)-1); ++ if (sz > 4) { ++ /* XXX double cast */ ++ context->ret_struct ++ = (arch_addr_t)context->regs.uregs[0]; ++ context->ncrn++; ++ } ++ } ++ + return context; + } + +@@ -277,6 +306,54 @@ arch_fetch_arg_next(struct fetch_context + struct Process *proc, + struct arg_type_info *info, struct value *valuep) + { ++ const size_t sz = type_sizeof(proc, info); ++ assert(sz != (size_t)-1); ++ ++ /* IHI0042E_aapcs: If the argument requires double-word ++ * alignment (8-byte), the NCRN is rounded up to the next even ++ * register number. */ ++ const size_t al = type_alignof(proc, info); ++ assert(al != (size_t)-1); ++ if (al == 8) ++ ctx->ncrn = ((ctx->ncrn + 1) / 2) * 2; ++ ++ /* If the size in words of the argument is not more than r4 ++ * minus NCRN, the argument is copied into core registers, ++ * starting at the NCRN. */ ++ /* If the NCRN is less than r4 and the NSAA is equal to the ++ * SP, the argument is split between core registers and the ++ * stack. */ ++ ++ const size_t words = (sz + 3) / 4; ++ if (ctx->ncrn < 4 && ctx->nsaa == ctx->sp) { ++ unsigned char *data = value_reserve(valuep, words * 4); ++ if (data == NULL) ++ return -1; ++ size_t i; ++ for (i = 0; i < words && ctx->ncrn < 4; ++i) { ++ memcpy(data, &ctx->regs.uregs[ctx->ncrn++], 4); ++ data += 4; ++ } ++ const size_t rest = (words - i) * 4; ++ if (rest > 0) { ++ umovebytes(proc, ctx->nsaa, data, rest); ++ ctx->nsaa += rest; ++ } ++ return 0; ++ } ++ ++ assert(ctx->ncrn == 4); ++ ++ /* If the argument required double-word alignment (8-byte), ++ * then the NSAA is rounded up to the next double-word ++ * address. */ ++ if (al == 8) ++ /* XXX double cast. */ ++ ctx->nsaa = (arch_addr_t)((((uintptr_t)ctx->nsaa + 7) / 8) * 8); ++ ++ value_in_inferior(valuep, ctx->nsaa); ++ ctx->nsaa += sz; ++ + return 0; + } + +@@ -288,21 +365,39 @@ arch_fetch_retval(struct fetch_context * + if (fetch_register_banks(proc, ctx) < 0) + return -1; + ++ size_t sz = type_sizeof(proc, info); ++ assert(sz != (size_t)-1); ++ + switch (info->type) { ++ unsigned char *data; ++ union { ++ struct { ++ uint32_t r0; ++ uint32_t r1; ++ } s; ++ unsigned char buf[8]; ++ } u; ++ + case ARGTYPE_VOID: + return 0; + + case ARGTYPE_FLOAT: + case ARGTYPE_DOUBLE: + if (ctx->hardfp) { +- size_t sz = type_sizeof(proc, info); +- assert(sz != (size_t)-1); + unsigned char *data = value_reserve(valuep, sz); + if (data == NULL) + return -1; + memmove(data, &ctx->fpregs, sz); + return 0; + } ++ goto pass_in_registers; ++ ++ case ARGTYPE_ARRAY: ++ case ARGTYPE_STRUCT: ++ if (sz > 4) { ++ value_in_inferior(valuep, ctx->ret_struct); ++ return 0; ++ } + /* Fall through. */ + + case ARGTYPE_CHAR: +@@ -313,9 +408,14 @@ arch_fetch_retval(struct fetch_context * + case ARGTYPE_LONG: + case ARGTYPE_ULONG: + case ARGTYPE_POINTER: +- case ARGTYPE_ARRAY: +- case ARGTYPE_STRUCT: +- return -1; ++ pass_in_registers: ++ if (arm_get_register(proc, ARM_REG_R3, &u.s.r0) < 0 ++ || (sz > 4 && arm_get_register(proc, ARM_REG_R1, ++ &u.s.r1) < 0) ++ || (data = value_reserve(valuep, sz)) == NULL) ++ return -1; ++ memmove(data, u.buf, sz); ++ return 0; + } + assert(info->type != info->type); + abort(); +Index: b/testsuite/ltrace.main/parameters.exp +=================================================================== +--- a/testsuite/ltrace.main/parameters.exp ++++ b/testsuite/ltrace.main/parameters.exp +@@ -35,9 +35,6 @@ if [regexp {ELF from incompatible archit + return + } + +-set xfail_spec {"arm*-*" } +-set xfail_spec_arm {"arm*-*"} +- + # Verify the output + set pattern "func_intptr(17)" + ltrace_verify_output ${objdir}/${subdir}/${testfile}.ltrace $pattern 1 +@@ -63,7 +60,6 @@ set pattern "func_ushort(33, 34)" + ltrace_verify_output ${objdir}/${subdir}/${testfile}.ltrace $pattern 1 + set pattern "func_float(3.40*, -3.40*).*= 3.40*" + ltrace_verify_output ${objdir}/${subdir}/${testfile}.ltrace $pattern 1 +-eval "setup_xfail $xfail_spec" + set pattern "func_double(3.40*, -3.40*).*= -3.40*" + ltrace_verify_output ${objdir}/${subdir}/${testfile}.ltrace $pattern 1 + set pattern "func_typedef(BLUE)" +@@ -86,7 +82,6 @@ set pattern "func_work(\\\"x\\\")" + ltrace_verify_output ${objdir}/${subdir}/${testfile}.ltrace $pattern 1 + set pattern "func_struct_2(17, { \\\"ABCDE\\\\\\\\0\\\", 0.250* }, 0.50*).*= { 0.250*, 'B', 'C' }" + ltrace_verify_output ${objdir}/${subdir}/${testfile}.ltrace $pattern 1 +-eval "setup_xfail $xfail_spec_arm" + set pattern "<... func_call resumed> \\\"x\\\", \\\"y\\\")" + ltrace_verify_output ${objdir}/${subdir}/${testfile}.ltrace $pattern 1 + diff -Nru ltrace-0.7.3/debian/patches/arm_fetch_backend_97a25160.patch ltrace-0.7.3/debian/patches/arm_fetch_backend_97a25160.patch --- ltrace-0.7.3/debian/patches/arm_fetch_backend_97a25160.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/arm_fetch_backend_97a25160.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,174 @@ +From 97a25160b0fe646d9c567e12c5abefe8e59873c2 Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Tue, 5 Feb 2013 03:30:11 +0100 +Subject: Add ARM fetch backend + +This only properly decodes float and double return values. More to come. +--- + sysdeps/linux-gnu/arm/fetch.c | 104 ++++++++++++++++++++++++++++++++++++++++++ + sysdeps/linux-gnu/arm/trace.c | 43 ----------------- + 2 files changed, 104 insertions(+), 43 deletions(-) + +Index: b/sysdeps/linux-gnu/arm/fetch.c +=================================================================== +--- a/sysdeps/linux-gnu/arm/fetch.c ++++ b/sysdeps/linux-gnu/arm/fetch.c +@@ -222,3 +222,107 @@ arch_library_clone(struct library *retp, + { + retp->arch = lib->arch; + } ++ ++struct fetch_context { ++ struct pt_regs regs; ++ struct { ++ union { ++ double d[32]; ++ float s[64]; ++ }; ++ uint32_t fpscr; ++ } fpregs; ++ bool hardfp:1; ++}; ++ ++static int ++fetch_register_banks(struct Process *proc, struct fetch_context *context) ++{ ++ if (context->hardfp ++ && ptrace(PTRACE_GETVFPREGS, proc->pid, ++ NULL, &context->fpregs) == -1) ++ return -1; ++ ++ return 0; ++} ++ ++struct fetch_context * ++arch_fetch_arg_init(enum tof type, struct Process *proc, ++ struct arg_type_info *ret_info) ++{ ++ struct fetch_context *context = malloc(sizeof(*context)); ++ context->hardfp = proc->libraries->arch.hardfp; ++ if (context == NULL ++ || fetch_register_banks(proc, context) < 0) { ++ free(context); ++ return NULL; ++ } ++ ++ return context; ++} ++ ++struct fetch_context * ++arch_fetch_arg_clone(struct Process *proc, ++ struct fetch_context *context) ++{ ++ struct fetch_context *clone = malloc(sizeof(*context)); ++ if (clone == NULL) ++ return NULL; ++ *clone = *context; ++ return clone; ++} ++ ++int ++arch_fetch_arg_next(struct fetch_context *ctx, enum tof type, ++ struct Process *proc, ++ struct arg_type_info *info, struct value *valuep) ++{ ++ return 0; ++} ++ ++int ++arch_fetch_retval(struct fetch_context *ctx, enum tof type, ++ struct Process *proc, struct arg_type_info *info, ++ struct value *valuep) ++{ ++ if (fetch_register_banks(proc, ctx) < 0) ++ return -1; ++ ++ switch (info->type) { ++ case ARGTYPE_VOID: ++ return 0; ++ ++ case ARGTYPE_FLOAT: ++ case ARGTYPE_DOUBLE: ++ if (ctx->hardfp) { ++ size_t sz = type_sizeof(proc, info); ++ assert(sz != (size_t)-1); ++ unsigned char *data = value_reserve(valuep, sz); ++ if (data == NULL) ++ return -1; ++ memmove(data, &ctx->fpregs, sz); ++ return 0; ++ } ++ /* Fall through. */ ++ ++ case ARGTYPE_CHAR: ++ case ARGTYPE_SHORT: ++ case ARGTYPE_USHORT: ++ case ARGTYPE_INT: ++ case ARGTYPE_UINT: ++ case ARGTYPE_LONG: ++ case ARGTYPE_ULONG: ++ case ARGTYPE_POINTER: ++ case ARGTYPE_ARRAY: ++ case ARGTYPE_STRUCT: ++ return -1; ++ } ++ assert(info->type != info->type); ++ abort(); ++} ++ ++void ++arch_fetch_arg_done(struct fetch_context *context) ++{ ++ free(context); ++} +Index: b/sysdeps/linux-gnu/arm/trace.c +=================================================================== +--- a/sysdeps/linux-gnu/arm/trace.c ++++ b/sysdeps/linux-gnu/arm/trace.c +@@ -103,46 +103,3 @@ syscall_p(Process *proc, int status, int + return 0; + } + +-long +-gimme_arg(enum tof type, Process *proc, int arg_num, struct arg_type_info *info) +-{ +- proc_archdep *a = (proc_archdep *) proc->arch_ptr; +- +- if (arg_num == -1) { /* return value */ +- return ptrace(PTRACE_PEEKUSER, proc->pid, off_r0, 0); +- } +- +- /* deal with the ARM calling conventions */ +- if (type == LT_TOF_FUNCTION || type == LT_TOF_FUNCTIONR) { +- if (arg_num < 4) { +- if (a->valid && type == LT_TOF_FUNCTION) +- return a->regs.uregs[arg_num]; +- if (a->valid && type == LT_TOF_FUNCTIONR) +- return a->func_arg[arg_num]; +- return ptrace(PTRACE_PEEKUSER, proc->pid, +- (void *)(4 * arg_num), 0); +- } else { +- return ptrace(PTRACE_PEEKDATA, proc->pid, +- proc->stack_pointer + 4 * (arg_num - 4), +- 0); +- } +- } else if (type == LT_TOF_SYSCALL || type == LT_TOF_SYSCALLR) { +- if (arg_num < 5) { +- if (a->valid && type == LT_TOF_SYSCALL) +- return a->regs.uregs[arg_num]; +- if (a->valid && type == LT_TOF_SYSCALLR) +- return a->sysc_arg[arg_num]; +- return ptrace(PTRACE_PEEKUSER, proc->pid, +- (void *)(4 * arg_num), 0); +- } else { +- return ptrace(PTRACE_PEEKDATA, proc->pid, +- proc->stack_pointer + 4 * (arg_num - 5), +- 0); +- } +- } else { +- fprintf(stderr, "gimme_arg called with wrong arguments\n"); +- exit(1); +- } +- +- return 0; +-} diff -Nru ltrace-0.7.3/debian/patches/arm_plt_rel_9e33f5ac.patch ltrace-0.7.3/debian/patches/arm_plt_rel_9e33f5ac.patch --- ltrace-0.7.3/debian/patches/arm_plt_rel_9e33f5ac.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/arm_plt_rel_9e33f5ac.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,65 @@ +From 9e33f5ac1037adeb32e9d693e6555967e9be68a6 Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Mon, 4 Nov 2013 22:50:11 -0500 +Subject: Fix compilation on ARM + +- This was broken several commits back by removing pltrel from + struct ltelf. +--- + sysdeps/linux-gnu/arm/arch.h | 4 +++- + sysdeps/linux-gnu/arm/plt.c | 14 ++++++++++++++ + 2 files changed, 17 insertions(+), 1 deletion(-) + +Index: b/sysdeps/linux-gnu/arm/arch.h +=================================================================== +--- a/sysdeps/linux-gnu/arm/arch.h ++++ b/sysdeps/linux-gnu/arm/arch.h +@@ -21,6 +21,8 @@ + #ifndef LTRACE_ARM_ARCH_H + #define LTRACE_ARM_ARCH_H + ++#include ++ + #define ARCH_HAVE_ENABLE_BREAKPOINT 1 + #define ARCH_HAVE_DISABLE_BREAKPOINT 1 + +@@ -43,7 +45,7 @@ struct arch_breakpoint_data { + + #define ARCH_HAVE_LTELF_DATA + struct arch_ltelf_data { +- /* We have this only for the hooks. */ ++ Elf_Data *jmprel_data; + }; + + #define ARCH_HAVE_LIBRARY_DATA +Index: b/sysdeps/linux-gnu/arm/plt.c +=================================================================== +--- a/sysdeps/linux-gnu/arm/plt.c ++++ b/sysdeps/linux-gnu/arm/plt.c +@@ -1,5 +1,6 @@ + /* + * This file is part of ltrace. ++ * Copyright (C) 2013 Petr Machata, Red Hat Inc. + * Copyright (C) 2010 Zach Welch, CodeSourcery + * Copyright (C) 2004,2008,2009 Juan Cespedes + * +@@ -40,6 +41,19 @@ get_hardfp(uint64_t abi_vfp_args) + int + arch_elf_init(struct ltelf *lte, struct library *lib) + { ++ GElf_Addr jmprel_addr; ++ Elf_Scn *jmprel_sec; ++ GElf_Shdr jmprel_shdr; ++ if (elf_load_dynamic_entry(lte, DT_JMPREL, &jmprel_addr) < 0 ++ || elf_get_section_covering(lte, jmprel_addr, ++ &jmprel_sec, &jmprel_shdr) < 0 ++ || jmprel_sec == NULL) ++ return -1; ++ ++ lte->arch.jmprel_data = elf_loaddata(jmprel_sec, &jmprel_shdr); ++ if (lte->arch.jmprel_data == NULL) ++ return -1; ++ + /* Nothing in this section is strictly critical. It's not + * that much of a deal if we fail to guess right whether the + * ABI is softfp or hardfp. */ diff -Nru ltrace-0.7.3/debian/patches/arm_vararg_without_vfp_88a0fe50.patch ltrace-0.7.3/debian/patches/arm_vararg_without_vfp_88a0fe50.patch --- ltrace-0.7.3/debian/patches/arm_vararg_without_vfp_88a0fe50.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/arm_vararg_without_vfp_88a0fe50.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,81 @@ +From 88a0fe50a3fad351cf28ef3902dbd0dd3540735c Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Wed, 6 Feb 2013 19:44:56 +0100 +Subject: VFP shouldn't be used for parameter passing in vararg functions on + ARM + +--- + sysdeps/linux-gnu/arm/arch.h | 1 + + sysdeps/linux-gnu/arm/fetch.c | 22 +++++++++++++++++++--- + 2 files changed, 20 insertions(+), 3 deletions(-) + +Index: b/sysdeps/linux-gnu/arm/fetch.c +=================================================================== +--- a/sysdeps/linux-gnu/arm/fetch.c ++++ b/sysdeps/linux-gnu/arm/fetch.c +@@ -254,6 +254,7 @@ struct fetch_context { + arch_addr_t nsaa; + arch_addr_t ret_struct; + bool hardfp:1; ++ bool in_varargs:1; + }; + + static int +@@ -389,7 +390,7 @@ arch_fetch_arg_next(struct fetch_context + const size_t sz = type_sizeof(proc, info); + assert(sz != (size_t)-1); + +- if (ctx->hardfp) { ++ if (ctx->hardfp && !ctx->in_varargs) { + int rc; + if ((rc = consider_vfp(ctx, proc, info, valuep)) != 1) + return rc; +@@ -454,7 +455,7 @@ arch_fetch_retval(struct fetch_context * + size_t sz = type_sizeof(proc, info); + assert(sz != (size_t)-1); + +- if (ctx->hardfp) { ++ if (ctx->hardfp && !ctx->in_varargs) { + int rc; + if ((rc = consider_vfp(ctx, proc, info, valuep)) != 1) + return rc; +@@ -475,7 +476,7 @@ arch_fetch_retval(struct fetch_context * + + case ARGTYPE_FLOAT: + case ARGTYPE_DOUBLE: +- if (ctx->hardfp) { ++ if (ctx->hardfp && !ctx->in_varargs) { + unsigned char *data = value_reserve(valuep, sz); + if (data == NULL) + return -1; +@@ -515,3 +516,18 @@ arch_fetch_arg_done(struct fetch_context + { + free(context); + } ++ ++int ++arch_fetch_param_pack_start(struct fetch_context *context, ++ enum param_pack_flavor ppflavor) ++{ ++ if (ppflavor == PARAM_PACK_VARARGS) ++ context->in_varargs = true; ++ return 0; ++} ++ ++void ++arch_fetch_param_pack_end(struct fetch_context *context) ++{ ++ context->in_varargs = false; ++} +Index: b/sysdeps/linux-gnu/arm/arch.h +=================================================================== +--- a/sysdeps/linux-gnu/arm/arch.h ++++ b/sysdeps/linux-gnu/arm/arch.h +@@ -35,6 +35,7 @@ + #define LT_ELF_MACHINE EM_ARM + + #define ARCH_HAVE_FETCH_ARG ++#define ARCH_HAVE_FETCH_PACK + #define ARCH_HAVE_BREAKPOINT_DATA + struct arch_breakpoint_data { + int thumb_mode; diff -Nru ltrace-0.7.3/debian/patches/arm_vfp_params_1c8596d4.patch ltrace-0.7.3/debian/patches/arm_vfp_params_1c8596d4.patch --- ltrace-0.7.3/debian/patches/arm_vfp_params_1c8596d4.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/arm_vfp_params_1c8596d4.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,173 @@ +From 1c8596d41dde12f9af6a21035fac0c64f428ab76 Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Wed, 6 Feb 2013 16:06:03 +0100 +Subject: Implement VFP parameter passing for ARM backend + +--- + sysdeps/linux-gnu/arm/fetch.c | 106 +++++++++++++++++++++++++++++++++++++----- + 1 file changed, 94 insertions(+), 12 deletions(-) + +Index: b/sysdeps/linux-gnu/arm/fetch.c +=================================================================== +--- a/sysdeps/linux-gnu/arm/fetch.c ++++ b/sysdeps/linux-gnu/arm/fetch.c +@@ -223,8 +223,15 @@ arch_library_clone(struct library *retp, + retp->arch = lib->arch; + } + ++enum { ++ /* How many (double) VFP registers the AAPCS uses for ++ * parameter passing. */ ++ NUM_VFP_REGS = 8, ++}; ++ + struct fetch_context { + struct pt_regs regs; ++ + struct { + union { + double d[32]; +@@ -232,6 +239,16 @@ struct fetch_context { + }; + uint32_t fpscr; + } fpregs; ++ ++ /* VFP register allocation. ALLOC.S tracks whether the ++ * corresponding FPREGS.S register is taken, ALLOC.D the same ++ * for FPREGS.D. We only track 8 (16) registers, because ++ * that's what the ABI uses for parameter passing. */ ++ union { ++ int16_t d[NUM_VFP_REGS]; ++ int8_t s[NUM_VFP_REGS * 2]; ++ } alloc; ++ + unsigned ncrn; + arch_addr_t sp; + arch_addr_t nsaa; +@@ -253,6 +270,8 @@ fetch_register_banks(struct Process *pro + context->ncrn = 0; + context->nsaa = context->sp = get_stack_pointer(proc); + ++ memset(&context->alloc, 0, sizeof(context->alloc)); ++ + return 0; + } + +@@ -301,6 +320,67 @@ arch_fetch_arg_clone(struct Process *pro + return clone; + } + ++/* 0 is success, 1 is failure, negative value is an error. */ ++static int ++pass_in_vfp(struct fetch_context *ctx, struct Process *proc, ++ enum arg_type type, size_t count, struct value *valuep) ++{ ++ assert(type == ARGTYPE_FLOAT || type == ARGTYPE_DOUBLE); ++ unsigned max = type == ARGTYPE_DOUBLE ? NUM_VFP_REGS : 2 * NUM_VFP_REGS; ++ if (count > max) ++ return 1; ++ ++ size_t i; ++ size_t j; ++ for (i = 0; i < max; ++i) { ++ for (j = i; j < i + count; ++j) ++ if ((type == ARGTYPE_DOUBLE && ctx->alloc.d[j] != 0) ++ || (type == ARGTYPE_FLOAT && ctx->alloc.s[j] != 0)) ++ goto next; ++ ++ /* Found COUNT consecutive unallocated registers at I. */ ++ const size_t sz = (type == ARGTYPE_FLOAT ? 4 : 8) * count; ++ unsigned char *data = value_reserve(valuep, sz); ++ if (data == NULL) ++ return -1; ++ ++ for (j = i; j < i + count; ++j) ++ if (type == ARGTYPE_DOUBLE) ++ ctx->alloc.d[j] = -1; ++ else ++ ctx->alloc.s[j] = -1; ++ ++ if (type == ARGTYPE_DOUBLE) ++ memcpy(data, ctx->fpregs.d + i, sz); ++ else ++ memcpy(data, ctx->fpregs.s + i, sz); ++ ++ return 0; ++ ++ next: ++ continue; ++ } ++ return 1; ++} ++ ++/* 0 is success, 1 is failure, negative value is an error. */ ++static int ++consider_vfp(struct fetch_context *ctx, struct Process *proc, ++ struct arg_type_info *info, struct value *valuep) ++{ ++ struct arg_type_info *float_info = NULL; ++ size_t hfa_size = 1; ++ if (info->type == ARGTYPE_FLOAT || info->type == ARGTYPE_DOUBLE) ++ float_info = info; ++ else ++ float_info = type_get_hfa_type(info, &hfa_size); ++ ++ if (float_info != NULL && hfa_size <= 4) ++ return pass_in_vfp(ctx, proc, float_info->type, ++ hfa_size, valuep); ++ return 1; ++} ++ + int + arch_fetch_arg_next(struct fetch_context *ctx, enum tof type, + struct Process *proc, +@@ -309,6 +389,12 @@ arch_fetch_arg_next(struct fetch_context + const size_t sz = type_sizeof(proc, info); + assert(sz != (size_t)-1); + ++ if (ctx->hardfp) { ++ int rc; ++ if ((rc = consider_vfp(ctx, proc, info, valuep)) != 1) ++ return rc; ++ } ++ + /* IHI0042E_aapcs: If the argument requires double-word + * alignment (8-byte), the NCRN is rounded up to the next even + * register number. */ +@@ -368,15 +454,14 @@ arch_fetch_retval(struct fetch_context * + size_t sz = type_sizeof(proc, info); + assert(sz != (size_t)-1); + ++ if (ctx->hardfp) { ++ int rc; ++ if ((rc = consider_vfp(ctx, proc, info, valuep)) != 1) ++ return rc; ++ } ++ + switch (info->type) { + unsigned char *data; +- union { +- struct { +- uint32_t r0; +- uint32_t r1; +- } s; +- unsigned char buf[8]; +- } u; + + case ARGTYPE_VOID: + return 0; +@@ -409,12 +494,9 @@ arch_fetch_retval(struct fetch_context * + case ARGTYPE_ULONG: + case ARGTYPE_POINTER: + pass_in_registers: +- if (arm_get_register(proc, ARM_REG_R3, &u.s.r0) < 0 +- || (sz > 4 && arm_get_register(proc, ARM_REG_R1, +- &u.s.r1) < 0) +- || (data = value_reserve(valuep, sz)) == NULL) ++ if ((data = value_reserve(valuep, sz)) == NULL) + return -1; +- memmove(data, u.buf, sz); ++ memmove(data, ctx->regs.uregs, sz); + return 0; + } + assert(info->type != info->type); diff -Nru ltrace-0.7.3/debian/patches/dont_ltelf_destroy_if_init_fails_0ba3c5ee.patch ltrace-0.7.3/debian/patches/dont_ltelf_destroy_if_init_fails_0ba3c5ee.patch --- ltrace-0.7.3/debian/patches/dont_ltelf_destroy_if_init_fails_0ba3c5ee.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/dont_ltelf_destroy_if_init_fails_0ba3c5ee.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,30 @@ +From 0ba3c5eee259b77e3883e40c4d0cd2fab5b03ff3 Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Mon, 11 Nov 2013 02:27:08 +0100 +Subject: In ltrace_init, don't call ltelf_destroy if ltelf_init fails + +--- + libltrace.c | 10 +++++++--- + 1 file changed, 7 insertions(+), 3 deletions(-) + +Index: b/libltrace.c +=================================================================== +--- a/libltrace.c ++++ b/libltrace.c +@@ -136,9 +136,13 @@ ltrace_init(int argc, char **argv) { + if (command) { + /* Check that the binary ABI is supported before + * calling execute_program. */ +- struct ltelf lte; +- ltelf_init(<e, command); +- ltelf_destroy(<e); ++ { ++ struct ltelf lte; ++ if (ltelf_init(<e, command) == 0) ++ ltelf_destroy(<e); ++ else ++ exit(EXIT_FAILURE); ++ } + + pid_t pid = execute_program(command, argv); + struct Process *proc = open_program(command, pid); diff -Nru ltrace-0.7.3/debian/patches/elf_load_dynamic_entry_4f2f66e6.patch ltrace-0.7.3/debian/patches/elf_load_dynamic_entry_4f2f66e6.patch --- ltrace-0.7.3/debian/patches/elf_load_dynamic_entry_4f2f66e6.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/elf_load_dynamic_entry_4f2f66e6.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,153 @@ +From 4f2f66e6abc7fedf3a5d04fab7cc00e5f82b37cf Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Mon, 4 Nov 2013 22:45:34 -0500 +Subject: Move load_dynamic_entry from PPC backend to ltrace-elf.c/.h + +--- + ltrace-elf.c | 32 +++++++++++++++++++++++++++++++ + ltrace-elf.h | 4 +++ + sysdeps/linux-gnu/ppc/plt.c | 45 ++++++-------------------------------------- + 3 files changed, 43 insertions(+), 38 deletions(-) + +Index: b/ltrace-elf.c +=================================================================== +--- a/ltrace-elf.c ++++ b/ltrace-elf.c +@@ -527,6 +527,38 @@ read_relplt(struct ltelf *lte, Elf_Scn * + return 0; + } + ++int ++elf_load_dynamic_entry(struct ltelf *lte, int tag, GElf_Addr *valuep) ++{ ++ Elf_Scn *scn; ++ GElf_Shdr shdr; ++ if (elf_get_section_type(lte, SHT_DYNAMIC, &scn, &shdr) < 0 ++ || scn == NULL) { ++ fail: ++ fprintf(stderr, "Couldn't get SHT_DYNAMIC: %s\n", ++ elf_errmsg(-1)); ++ return -1; ++ } ++ ++ Elf_Data *data = elf_loaddata(scn, &shdr); ++ if (data == NULL) ++ goto fail; ++ ++ size_t j; ++ for (j = 0; j < shdr.sh_size / shdr.sh_entsize; ++j) { ++ GElf_Dyn dyn; ++ if (gelf_getdyn(data, j, &dyn) == NULL) ++ goto fail; ++ ++ if(dyn.d_tag == tag) { ++ *valuep = dyn.d_un.d_ptr; ++ return 0; ++ } ++ } ++ ++ return -1; ++} ++ + static int + ltelf_read_elf(struct ltelf *lte, const char *filename) + { +Index: b/ltrace-elf.h +=================================================================== +--- a/ltrace-elf.h ++++ b/ltrace-elf.h +@@ -113,6 +113,10 @@ struct elf_each_symbol_t { + void *data), + void *data); + ++/* Read a given DT_ TAG from LTE. Value is returned in *VALUEP. ++ * Returns 0 on success or a negative value on failure. */ ++int elf_load_dynamic_entry(struct ltelf *lte, int tag, GElf_Addr *valuep); ++ + /* Read, respectively, 1, 2, 4, or 8 bytes from Elf data at given + * OFFSET, and store it in *RETP. Returns 0 on success or a negative + * value if there's not enough data. */ +Index: b/sysdeps/linux-gnu/ppc/plt.c +=================================================================== +--- a/sysdeps/linux-gnu/ppc/plt.c ++++ b/sysdeps/linux-gnu/ppc/plt.c +@@ -441,38 +441,6 @@ get_glink_vma(struct ltelf *lte, GElf_Ad + } + + static int +-load_dynamic_entry(struct ltelf *lte, int tag, GElf_Addr *valuep) +-{ +- Elf_Scn *scn; +- GElf_Shdr shdr; +- if (elf_get_section_type(lte, SHT_DYNAMIC, &scn, &shdr) < 0 +- || scn == NULL) { +- fail: +- fprintf(stderr, "Couldn't get SHT_DYNAMIC: %s\n", +- elf_errmsg(-1)); +- return -1; +- } +- +- Elf_Data *data = elf_loaddata(scn, &shdr); +- if (data == NULL) +- goto fail; +- +- size_t j; +- for (j = 0; j < shdr.sh_size / shdr.sh_entsize; ++j) { +- GElf_Dyn dyn; +- if (gelf_getdyn(data, j, &dyn) == NULL) +- goto fail; +- +- if(dyn.d_tag == tag) { +- *valuep = dyn.d_un.d_ptr; +- return 0; +- } +- } +- +- return -1; +-} +- +-static int + nonzero_data(Elf_Data *data) + { + /* We are not supposed to get here if there's no PLT. */ +@@ -520,7 +488,7 @@ arch_elf_init(struct ltelf *lte, struct + + if (lte->ehdr.e_machine == EM_PPC && lte->arch.secure_plt) { + GElf_Addr ppcgot; +- if (load_dynamic_entry(lte, DT_PPC_GOT, &ppcgot) < 0) { ++ if (elf_load_dynamic_entry(lte, DT_PPC_GOT, &ppcgot) < 0) { + fprintf(stderr, "couldn't find DT_PPC_GOT\n"); + return -1; + } +@@ -533,7 +501,8 @@ arch_elf_init(struct ltelf *lte, struct + + } else if (lte->ehdr.e_machine == EM_PPC64) { + GElf_Addr glink_vma; +- if (load_dynamic_entry(lte, DT_PPC64_GLINK, &glink_vma) < 0) { ++ if (elf_load_dynamic_entry(lte, DT_PPC64_GLINK, ++ &glink_vma) < 0) { + fprintf(stderr, "couldn't find DT_PPC64_GLINK\n"); + return -1; + } +@@ -543,8 +512,8 @@ arch_elf_init(struct ltelf *lte, struct + + } else { + /* By exhaustion--PPC32 BSS. */ +- if (load_dynamic_entry(lte, DT_PLTGOT, +- &lib->arch.pltgot_addr) < 0) { ++ if (elf_load_dynamic_entry(lte, DT_PLTGOT, ++ &lib->arch.pltgot_addr) < 0) { + fprintf(stderr, "couldn't find DT_PLTGOT\n"); + return -1; + } +@@ -639,8 +608,8 @@ arch_elf_init(struct ltelf *lte, struct + Elf_Scn *rela_sec; + GElf_Shdr rela_shdr; + if (lte->ehdr.e_machine == EM_PPC64 +- && load_dynamic_entry(lte, DT_RELA, &rela) == 0 +- && load_dynamic_entry(lte, DT_RELASZ, &relasz) == 0 ++ && elf_load_dynamic_entry(lte, DT_RELA, &rela) == 0 ++ && elf_load_dynamic_entry(lte, DT_RELASZ, &relasz) == 0 + && elf_get_section_covering(lte, rela, &rela_sec, &rela_shdr) == 0 + && rela_sec != NULL) { + diff -Nru ltrace-0.7.3/debian/patches/elf_read_uleb128_184779e4.patch ltrace-0.7.3/debian/patches/elf_read_uleb128_184779e4.patch --- ltrace-0.7.3/debian/patches/elf_read_uleb128_184779e4.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/elf_read_uleb128_184779e4.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,75 @@ +From 184779e4e8a42f2e9e7f3cee4bf4eb31e8c84ee4 Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Tue, 5 Feb 2013 01:52:05 +0100 +Subject: Add elf_read_{,next_}uleb128 + +--- + ltrace-elf.c | 32 ++++++++++++++++++++++++++++++++ + ltrace-elf.h | 5 +++++ + 2 files changed, 37 insertions(+) + +Index: b/ltrace-elf.c +=================================================================== +--- a/ltrace-elf.c ++++ b/ltrace-elf.c +@@ -321,6 +321,38 @@ DEF_READER(elf_read_next_u64, 64) + #undef DEF_READER + + int ++elf_read_next_uleb128(Elf_Data *data, GElf_Xword *offset, uint64_t *retp) ++{ ++ uint64_t result = 0; ++ int shift = 0; ++ int size = 8 * sizeof result; ++ ++ while (1) { ++ uint8_t byte; ++ if (elf_read_next_u8(data, offset, &byte) < 0) ++ return -1; ++ ++ uint8_t payload = byte & 0x7f; ++ result |= (uint64_t)payload << shift; ++ shift += 7; ++ if (shift > size && byte != 0x1) ++ return -1; ++ if ((byte & 0x80) == 0) ++ break; ++ } ++ ++ if (retp != NULL) ++ *retp = result; ++ return 0; ++} ++ ++int ++elf_read_uleb128(Elf_Data *data, GElf_Xword offset, uint64_t *retp) ++{ ++ return elf_read_next_uleb128(data, &offset, retp); ++} ++ ++int + ltelf_init(struct ltelf *lte, const char *filename) + { + memset(lte, 0, sizeof *lte); +Index: b/ltrace-elf.h +=================================================================== +--- a/ltrace-elf.h ++++ b/ltrace-elf.h +@@ -121,12 +121,17 @@ int elf_read_u16(Elf_Data *data, GElf_Xw + int elf_read_u32(Elf_Data *data, GElf_Xword offset, uint32_t *retp); + int elf_read_u64(Elf_Data *data, GElf_Xword offset, uint64_t *retp); + ++/* Read at most 64-bit quantity recorded in an ULEB128 variable-length ++ * encoding. */ ++int elf_read_uleb128(Elf_Data *data, GElf_Xword offset, uint64_t *retp); ++ + /* These are same as above, but update *OFFSET with the width + * of read datum. */ + int elf_read_next_u8(Elf_Data *data, GElf_Xword *offset, uint8_t *retp); + int elf_read_next_u16(Elf_Data *data, GElf_Xword *offset, uint16_t *retp); + int elf_read_next_u32(Elf_Data *data, GElf_Xword *offset, uint32_t *retp); + int elf_read_next_u64(Elf_Data *data, GElf_Xword *offset, uint64_t *retp); ++int elf_read_next_uleb128(Elf_Data *data, GElf_Xword *offset, uint64_t *retp); + + /* Return whether there's AMOUNT more bytes after OFFSET in DATA. */ + int elf_can_read_next(Elf_Data *data, GElf_Xword offset, GElf_Xword amount); diff -Nru ltrace-0.7.3/debian/patches/find_irelative_b061bae3.patch ltrace-0.7.3/debian/patches/find_irelative_b061bae3.patch --- ltrace-0.7.3/debian/patches/find_irelative_b061bae3.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/find_irelative_b061bae3.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,71 @@ +From b061bae322edd4894f14ea2aea6baec36d32eda8 Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Fri, 25 Oct 2013 23:50:18 +0200 +Subject: Split linux_elf_find_irelative_name out of + linux_elf_add_plt_entry_irelative + +--- + sysdeps/linux-gnu/trace.c | 23 ++++++++++++++--------- + sysdeps/linux-gnu/trace.h | 6 ++++++ + 2 files changed, 20 insertions(+), 9 deletions(-) + +Index: b/sysdeps/linux-gnu/trace.c +=================================================================== +--- a/sysdeps/linux-gnu/trace.c ++++ b/sysdeps/linux-gnu/trace.c +@@ -1243,17 +1243,14 @@ irelative_name_cb(GElf_Sym *symbol, cons + return CBS_CONT; + } + +-enum plt_status +-linux_elf_add_plt_entry_irelative(struct Process *proc, struct ltelf *lte, +- GElf_Rela *rela, size_t ndx, +- struct library_symbol **ret) +- ++char * ++linux_elf_find_irelative_name(struct ltelf *lte, GElf_Rela *rela) + { + struct irelative_name_data_t data = { rela->r_addend, NULL }; + if (rela->r_addend != 0 + && elf_each_symbol(lte, 0, + irelative_name_cb, &data).status < 0) +- return -1; ++ return NULL; + + const char *name; + if (data.found_name != NULL) { +@@ -1268,8 +1265,16 @@ linux_elf_add_plt_entry_irelative(struct + #undef NAME + } + +- if (default_elf_add_plt_entry(proc, lte, name, rela, ndx, ret) < 0) +- return PLT_FAIL; ++ return strdup(name); ++} + +- return PLT_OK; ++enum plt_status ++linux_elf_add_plt_entry_irelative(struct Process *proc, struct ltelf *lte, ++ GElf_Rela *rela, size_t ndx, ++ struct library_symbol **ret) ++{ ++ char *name = linux_elf_find_irelative_name(lte, rela); ++ int i = default_elf_add_plt_entry(proc, lte, name, rela, ndx, ret); ++ free(name); ++ return i < 0 ? plt_fail : plt_ok; + } +Index: b/sysdeps/linux-gnu/trace.h +=================================================================== +--- a/sysdeps/linux-gnu/trace.h ++++ b/sysdeps/linux-gnu/trace.h +@@ -136,4 +136,10 @@ enum plt_status linux_elf_add_plt_entry_ + GElf_Rela *rela, size_t ndx, + struct library_symbol **ret); + ++/* Service routine of the above. Determines a name corresponding to ++ * RELA, or invents a new one. Returns NULL on failures, otherwise it ++ * returns a malloc'd pointer that the caller is responsible for ++ * freeing. */ ++char *linux_elf_find_irelative_name(struct ltelf *lte, GElf_Rela *rela); ++ + #endif /* _LTRACE_LINUX_TRACE_H_ */ diff -Nru ltrace-0.7.3/debian/patches/Implement-aarch64-support.patch ltrace-0.7.3/debian/patches/Implement-aarch64-support.patch --- ltrace-0.7.3/debian/patches/Implement-aarch64-support.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/Implement-aarch64-support.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,1945 @@ +Author: Petr Machata +Description: Set child stack alignment in trace-clone.c + This is important on aarch64, which requires 16-byte aligned + stack pointer. This might be relevant on other arches as well, + I suspect we just happened to get the 16-byte boundary in some + cases. +Applied-Upstream: http://anonscm.debian.org/gitweb/?p=collab-maint/ltrace.git;a=commit;h=912a0f75b3521803fa724a55f0e883c134c7b4e9 +Last-Update: 2014-03-13 + +Index: ltrace/configure.ac +=================================================================== +--- ltrace.orig/configure.ac 2014-03-12 16:20:44.020676662 -0600 ++++ ltrace/configure.ac 2014-03-12 16:20:44.012676553 -0600 +@@ -1,6 +1,6 @@ + # -*- Autoconf -*- + # This file is part of ltrace. +-# Copyright (C) 2010,2013 Petr Machata, Red Hat Inc. ++# Copyright (C) 2010,2013,2014 Petr Machata, Red Hat Inc. + # Copyright (C) 2010,2011 Joe Damato + # Copyright (C) 2010 Marc Kleine-Budde + # Copyright (C) 2010 Zachary T Welch +@@ -319,6 +319,7 @@ + Makefile + sysdeps/Makefile + sysdeps/linux-gnu/Makefile ++ sysdeps/linux-gnu/aarch64/Makefile + sysdeps/linux-gnu/alpha/Makefile + sysdeps/linux-gnu/arm/Makefile + sysdeps/linux-gnu/cris/Makefile +Index: ltrace/sysdeps/linux-gnu/Makefile.am +=================================================================== +--- ltrace.orig/sysdeps/linux-gnu/Makefile.am 2014-03-12 16:20:44.020676662 -0600 ++++ ltrace/sysdeps/linux-gnu/Makefile.am 2014-03-12 16:20:44.016676607 -0600 +@@ -1,4 +1,5 @@ + # This file is part of ltrace. ++# Copyright (C) 2014 Petr Machata, Red Hat, Inc. + # Copyright (C) 2010,2012 Marc Kleine-Budde, Pengutronix + # + # This program is free software; you can redistribute it and/or +@@ -16,7 +17,7 @@ + # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + # 02110-1301 USA + +-DIST_SUBDIRS = alpha arm cris ia64 m68k mips ppc s390 sparc x86 ++DIST_SUBDIRS = aarch64 alpha arm cris ia64 m68k mips ppc s390 sparc x86 + + SUBDIRS = \ + $(HOST_CPU) +Index: ltrace/sysdeps/linux-gnu/aarch64/Makefile.am +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ ltrace/sysdeps/linux-gnu/aarch64/Makefile.am 2014-03-12 16:20:44.016676607 -0600 +@@ -0,0 +1,25 @@ ++# This file is part of ltrace. ++# Copyright (C) 2014 Petr Machata, Red Hat, Inc. ++# ++# This program is free software; you can redistribute it and/or ++# modify it under the terms of the GNU General Public License as ++# published by the Free Software Foundation; either version 2 of the ++# License, or (at your option) any later version. ++# ++# This program is distributed in the hope that it will be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program; if not, write to the Free Software ++# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++# 02110-1301 USA ++ ++noinst_LTLIBRARIES = ../libcpu.la ++ ++___libcpu_la_SOURCES = fetch.c plt.c regs.c trace.c ++ ++noinst_HEADERS = arch.h ptrace.h signalent.h syscallent.h ++ ++MAINTAINERCLEANFILES = Makefile.in +Index: ltrace/sysdeps/linux-gnu/aarch64/arch.h +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ ltrace/sysdeps/linux-gnu/aarch64/arch.h 2014-03-12 16:20:44.016676607 -0600 +@@ -0,0 +1,37 @@ ++/* ++ * This file is part of ltrace. ++ * Copyright (C) 2014 Petr Machata, Red Hat, Inc. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ */ ++#ifndef LTRACE_AARCH64_ARCH_H ++#define LTRACE_AARCH64_ARCH_H ++ ++/* | 31 21 | 20 5 | 4 0 | * ++ * | 1 1 0 1 0 1 0 0 0 0 1 | imm16 | 0 0 0 0 0 | */ ++#define BREAKPOINT_VALUE { 0xd4, 0x20, 0, 0 } ++#define BREAKPOINT_LENGTH 4 ++#define DECR_PC_AFTER_BREAK 0 ++ ++#define LT_ELFCLASS ELFCLASS64 ++#define LT_ELF_MACHINE EM_AARCH64 ++ ++#define ARCH_HAVE_FETCH_ARG ++#define ARCH_ENDIAN_BIG ++#define ARCH_HAVE_SIZEOF ++#define ARCH_HAVE_ALIGNOF ++ ++#endif /* LTRACE_AARCH64_ARCH_H */ +Index: ltrace/sysdeps/linux-gnu/aarch64/fetch.c +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ ltrace/sysdeps/linux-gnu/aarch64/fetch.c 2014-03-12 16:23:25.382866486 -0600 +@@ -0,0 +1,365 @@ ++/* ++ * This file is part of ltrace. ++ * Copyright (C) 2014 Petr Machata, Red Hat, Inc. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include "fetch.h" ++#include "proc.h" ++#include "type.h" ++#include "value.h" ++ ++int aarch64_read_gregs(struct Process *proc, struct user_pt_regs *regs); ++int aarch64_read_fregs(struct Process *proc, struct user_fpsimd_state *regs); ++ ++ ++struct fetch_context ++{ ++ struct user_pt_regs gregs; ++ struct user_fpsimd_state fpregs; ++ arch_addr_t nsaa; ++ unsigned ngrn; ++ unsigned nsrn; ++ arch_addr_t x8; ++}; ++ ++static int ++context_init(struct fetch_context *context, struct Process *proc) ++{ ++ if (aarch64_read_gregs(proc, &context->gregs) < 0 ++ || aarch64_read_fregs(proc, &context->fpregs) < 0) ++ return -1; ++ ++ context->ngrn = 0; ++ context->nsrn = 0; ++ /* XXX double cast */ ++ context->nsaa = (arch_addr_t) (uintptr_t) context->gregs.sp; ++ context->x8 = 0; ++ ++ return 0; ++} ++ ++struct fetch_context * ++arch_fetch_arg_clone(struct Process *proc, struct fetch_context *context) ++{ ++ struct fetch_context *ret = malloc(sizeof(*ret)); ++ if (ret == NULL) ++ return NULL; ++ return memcpy(ret, context, sizeof(*ret)); ++} ++ ++static void ++fetch_next_gpr(struct fetch_context *context, unsigned char *buf) ++{ ++ uint64_t u = context->gregs.regs[context->ngrn++]; ++ memcpy(buf, &u, 8); ++} ++ ++static int ++fetch_gpr(struct fetch_context *context, struct value *value, size_t sz) ++{ ++ if (sz < 8) ++ sz = 8; ++ ++ unsigned char *buf = value_reserve(value, sz); ++ if (buf == NULL) ++ return -1; ++ ++ size_t i; ++ for (i = 0; i < sz; i += 8) ++ fetch_next_gpr(context, buf + i); ++ ++ return 0; ++} ++ ++static void ++fetch_next_sse(struct fetch_context *context, unsigned char *buf, size_t sz) ++{ ++ __int128 u = context->fpregs.vregs[context->nsrn++]; ++ memcpy(buf, &u, sz); ++} ++ ++static int ++fetch_sse(struct fetch_context *context, struct value *value, size_t sz) ++{ ++ unsigned char *buf = value_reserve(value, sz); ++ if (buf == NULL) ++ return -1; ++ ++ fetch_next_sse(context, buf, sz); ++ return 0; ++} ++ ++static int ++fetch_hfa(struct fetch_context *context, ++ struct value *value, struct arg_type_info *hfa_t, size_t count) ++{ ++ size_t sz = type_sizeof(value->inferior, hfa_t); ++ unsigned char *buf = value_reserve(value, sz * count); ++ if (buf == NULL) ++ return -1; ++ ++ size_t i; ++ for (i = 0; i < count; ++i) { ++ fetch_next_sse(context, buf, sz); ++ buf += sz; ++ } ++ return 0; ++} ++ ++static int ++fetch_stack(struct fetch_context *context, struct value *value, ++ size_t align, size_t sz) ++{ ++ if (align < 8) ++ align = 8; ++ size_t amount = ((sz + align - 1) / align) * align; ++ ++ /* XXX double casts */ ++ uintptr_t sp = (uintptr_t) context->nsaa; ++ sp = ((sp + align - 1) / align) * align; ++ ++ value_in_inferior(value, (arch_addr_t) sp); ++ ++ sp += amount; ++ context->nsaa = (arch_addr_t) sp; ++ ++ return 0; ++} ++ ++enum convert_method { ++ CVT_ERR = -1, ++ CVT_NOP = 0, ++ CVT_BYREF, ++}; ++ ++enum fetch_method { ++ FETCH_NOP, ++ FETCH_STACK, ++ FETCH_GPR, ++ FETCH_SSE, ++ FETCH_HFA, ++}; ++ ++struct fetch_script { ++ enum convert_method c; ++ enum fetch_method f; ++ size_t sz; ++ struct arg_type_info *hfa_t; ++ size_t count; ++}; ++ ++static struct fetch_script ++pass_arg(struct fetch_context const *context, ++ struct Process *proc, struct arg_type_info *info) ++{ ++ enum fetch_method cvt = CVT_NOP; ++ ++ size_t sz = type_sizeof(proc, info); ++ if (sz == (size_t) -1) ++ return (struct fetch_script) { CVT_ERR, FETCH_NOP, sz }; ++ ++ switch (info->type) { ++ case ARGTYPE_VOID: ++ return (struct fetch_script) { cvt, FETCH_NOP, sz }; ++ ++ case ARGTYPE_STRUCT: ++ case ARGTYPE_ARRAY:; ++ size_t count; ++ struct arg_type_info *hfa_t = type_get_hfa_type(info, &count); ++ if (hfa_t != NULL && count <= 4) { ++ if (context->nsrn + count <= 8) ++ return (struct fetch_script) ++ { cvt, FETCH_HFA, sz, hfa_t, count }; ++ return (struct fetch_script) ++ { cvt, FETCH_STACK, sz, hfa_t, count }; ++ } ++ ++ if (sz <= 16) { ++ size_t count = sz / 8; ++ if (context->ngrn + count <= 8) ++ return (struct fetch_script) ++ { cvt, FETCH_GPR, sz }; ++ } ++ ++ cvt = CVT_BYREF; ++ sz = 8; ++ /* Fall through. */ ++ ++ case ARGTYPE_POINTER: ++ case ARGTYPE_INT: ++ case ARGTYPE_UINT: ++ case ARGTYPE_LONG: ++ case ARGTYPE_ULONG: ++ case ARGTYPE_CHAR: ++ case ARGTYPE_SHORT: ++ case ARGTYPE_USHORT: ++ if (context->ngrn < 8 && sz <= 8) ++ return (struct fetch_script) { cvt, FETCH_GPR, sz }; ++ /* We don't support types wider than 8 bytes as of ++ * now. */ ++ assert(sz <= 8); ++ ++ return (struct fetch_script) { cvt, FETCH_STACK, sz }; ++ ++ case ARGTYPE_FLOAT: ++ case ARGTYPE_DOUBLE: ++ if (context->nsrn < 8) { ++ /* ltrace doesn't support float128. */ ++ assert(sz <= 8); ++ return (struct fetch_script) { cvt, FETCH_SSE, sz }; ++ } ++ ++ return (struct fetch_script) { cvt, FETCH_STACK, sz }; ++ } ++ ++ assert(! "Failed to allocate argument."); ++ abort(); ++} ++ ++static int ++convert_arg(struct value *value, struct fetch_script how) ++{ ++ switch (how.c) { ++ case CVT_NOP: ++ return 0; ++ case CVT_BYREF: ++ return value_pass_by_reference(value); ++ case CVT_ERR: ++ return -1; ++ } ++ ++ assert(! "Don't know how to convert argument."); ++ abort(); ++} ++ ++static int ++fetch_arg(struct fetch_context *context, ++ struct Process *proc, struct arg_type_info *info, ++ struct value *value, struct fetch_script how) ++{ ++ if (convert_arg(value, how) < 0) ++ return -1; ++ ++ switch (how.f) { ++ case FETCH_NOP: ++ return 0; ++ ++ case FETCH_STACK: ++ if (how.hfa_t != NULL && how.count != 0 && how.count <= 8) ++ context->nsrn = 8; ++ return fetch_stack(context, value, ++ type_alignof(proc, info), how.sz); ++ ++ case FETCH_GPR: ++ return fetch_gpr(context, value, how.sz); ++ ++ case FETCH_SSE: ++ return fetch_sse(context, value, how.sz); ++ ++ case FETCH_HFA: ++ return fetch_hfa(context, value, how.hfa_t, how.count); ++ } ++ ++ assert(! "Don't know how to fetch argument."); ++ abort(); ++} ++ ++struct fetch_context * ++arch_fetch_arg_init(enum tof type, struct Process *proc, ++ struct arg_type_info *ret_info) ++{ ++ struct fetch_context *context = malloc(sizeof *context); ++ if (context == NULL || context_init(context, proc) < 0) { ++ fail: ++ free(context); ++ return NULL; ++ } ++ ++ /* There's a provision in ARMv8 parameter passing convention ++ * for returning types that, if passed as first argument to a ++ * function, would be passed on stack. For those types, x8 ++ * contains an address where the return argument should be ++ * placed. The callee doesn't need to preserve the value of ++ * x8, so we need to fetch it now. ++ * ++ * To my knowledge, there are currently no types where this ++ * holds, but the code is here, utterly untested. */ ++ ++ struct fetch_script how = pass_arg(context, proc, ret_info); ++ if (how.c == CVT_ERR) ++ goto fail; ++ if (how.c == CVT_NOP && how.f == FETCH_STACK) { ++ /* XXX double cast. */ ++ context->x8 = (arch_addr_t) (uintptr_t) context->gregs.regs[8]; ++ /* See the comment above about the assert. */ ++ assert(! "Unexpected: first argument passed on stack."); ++ abort(); ++ } ++ ++ return context; ++} ++ ++int ++arch_fetch_arg_next(struct fetch_context *context, enum tof type, ++ struct Process *proc, struct arg_type_info *info, ++ struct value *value) ++{ ++ return fetch_arg(context, proc, info, value, ++ pass_arg(context, proc, info)); ++} ++ ++int ++arch_fetch_retval(struct fetch_context *context, enum tof type, ++ struct Process *proc, struct arg_type_info *info, ++ struct value *value) ++{ ++ if (context->x8 != 0) { ++ value_in_inferior(value, context->x8); ++ return 0; ++ } ++ ++ if (context_init(context, proc) < 0) ++ return -1; ++ ++ return fetch_arg(context, proc, info, value, ++ pass_arg(context, proc, info)); ++} ++ ++void ++arch_fetch_arg_done(struct fetch_context *context) ++{ ++ if (context != NULL) ++ free(context); ++} ++ ++size_t ++arch_type_sizeof(struct Process *proc, struct arg_type_info *arg) ++{ ++ return (size_t) -2; ++} ++ ++size_t ++arch_type_alignof(struct Process *proc, struct arg_type_info *arg) ++{ ++ return (size_t) -2; ++} +Index: ltrace/sysdeps/linux-gnu/aarch64/plt.c +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ ltrace/sysdeps/linux-gnu/aarch64/plt.c 2014-03-12 16:23:25.382866486 -0600 +@@ -0,0 +1,38 @@ ++/* ++ * This file is part of ltrace. ++ * Copyright (C) 2014 Petr Machata, Red Hat, Inc. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ */ ++ ++#include ++ ++#include "backend.h" ++#include "proc.h" ++#include "library.h" ++#include "ltrace-elf.h" ++ ++arch_addr_t ++sym2addr(struct Process *proc, struct library_symbol *sym) ++{ ++ return sym->enter_addr; ++} ++ ++GElf_Addr ++arch_plt_sym_val(struct ltelf *lte, size_t ndx, GElf_Rela *rela) ++{ ++ return lte->plt_addr + 32 + ndx * 16; ++} +Index: ltrace/sysdeps/linux-gnu/aarch64/ptrace.h +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ ltrace/sysdeps/linux-gnu/aarch64/ptrace.h 2014-03-12 16:20:44.016676607 -0600 +@@ -0,0 +1,22 @@ ++/* ++ * This file is part of ltrace. ++ * Copyright (C) 2014 Petr Machata, Red Hat, Inc. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ */ ++ ++#include ++#include +Index: ltrace/sysdeps/linux-gnu/aarch64/regs.c +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ ltrace/sysdeps/linux-gnu/aarch64/regs.c 2014-03-12 16:23:25.382866486 -0600 +@@ -0,0 +1,130 @@ ++/* ++ * This file is part of ltrace. ++ * Copyright (C) 2014 Petr Machata, Red Hat, Inc. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "backend.h" ++#include "proc.h" ++ ++#define PC_OFF (32 * 4) ++ ++int ++aarch64_read_gregs(struct Process *proc, struct user_pt_regs *regs) ++{ ++ *regs = (struct user_pt_regs) {}; ++ struct iovec iovec; ++ iovec.iov_base = regs; ++ iovec.iov_len = sizeof *regs; ++ return ptrace(PTRACE_GETREGSET, proc->pid, NT_PRSTATUS, &iovec) < 0 ++ ? -1 : 0; ++} ++ ++int ++aarch64_write_gregs(struct Process *proc, struct user_pt_regs *regs) ++{ ++ struct iovec iovec; ++ iovec.iov_base = regs; ++ iovec.iov_len = sizeof *regs; ++ return ptrace(PTRACE_SETREGSET, proc->pid, NT_PRSTATUS, &iovec) < 0 ++ ? -1 : 0; ++} ++ ++int ++aarch64_read_fregs(struct Process *proc, struct user_fpsimd_state *regs) ++{ ++ *regs = (struct user_fpsimd_state) {}; ++ struct iovec iovec; ++ iovec.iov_base = regs; ++ iovec.iov_len = sizeof *regs; ++ return ptrace(PTRACE_GETREGSET, proc->pid, NT_FPREGSET, &iovec) < 0 ++ ? -1 : 0; ++} ++ ++arch_addr_t ++get_instruction_pointer(struct Process *proc) ++{ ++ struct user_pt_regs regs; ++ if (aarch64_read_gregs(proc, ®s) < 0) { ++ fprintf(stderr, "get_instruction_pointer: " ++ "Couldn't read registers of %d.\n", proc->pid); ++ return 0; ++ } ++ ++ /* ++ char buf[128]; ++ sprintf(buf, "cat /proc/%d/maps", proc->pid); ++ system(buf); ++ */ ++ ++ /* XXX double cast */ ++ return (arch_addr_t) (uintptr_t) regs.pc; ++} ++ ++void ++set_instruction_pointer(struct Process *proc, arch_addr_t addr) ++{ ++ struct user_pt_regs regs; ++ if (aarch64_read_gregs(proc, ®s) < 0) { ++ fprintf(stderr, "get_instruction_pointer: " ++ "Couldn't read registers of %d.\n", proc->pid); ++ return; ++ } ++ ++ /* XXX double cast */ ++ regs.pc = (uint64_t) (uintptr_t) addr; ++ ++ if (aarch64_write_gregs(proc, ®s) < 0) { ++ fprintf(stderr, "get_instruction_pointer: " ++ "Couldn't write registers of %d.\n", proc->pid); ++ return; ++ } ++} ++ ++arch_addr_t ++get_stack_pointer(struct Process *proc) ++{ ++ struct user_pt_regs regs; ++ if (aarch64_read_gregs(proc, ®s) < 0) { ++ fprintf(stderr, "get_stack_pointer: " ++ "Couldn't read registers of %d.\n", proc->pid); ++ return 0; ++ } ++ ++ /* XXX double cast */ ++ return (arch_addr_t) (uintptr_t) regs.sp; ++} ++ ++arch_addr_t ++get_return_addr(struct Process *proc, arch_addr_t stack_pointer) ++{ ++ struct user_pt_regs regs; ++ if (aarch64_read_gregs(proc, ®s) < 0) { ++ fprintf(stderr, "get_return_addr: " ++ "Couldn't read registers of %d.\n", proc->pid); ++ return 0; ++ } ++ ++ /* XXX double cast */ ++ return (arch_addr_t) (uintptr_t) regs.regs[30]; ++} +Index: ltrace/sysdeps/linux-gnu/aarch64/signalent.h +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ ltrace/sysdeps/linux-gnu/aarch64/signalent.h 2014-03-12 16:20:44.016676607 -0600 +@@ -0,0 +1,52 @@ ++/* ++ * This file is part of ltrace. ++ * Copyright (C) 2006 Ian Wienand ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ */ ++ ++ "SIG_0", /* 0 */ ++ "SIGHUP", /* 1 */ ++ "SIGINT", /* 2 */ ++ "SIGQUIT", /* 3 */ ++ "SIGILL", /* 4 */ ++ "SIGTRAP", /* 5 */ ++ "SIGABRT", /* 6 */ ++ "SIGBUS", /* 7 */ ++ "SIGFPE", /* 8 */ ++ "SIGKILL", /* 9 */ ++ "SIGUSR1", /* 10 */ ++ "SIGSEGV", /* 11 */ ++ "SIGUSR2", /* 12 */ ++ "SIGPIPE", /* 13 */ ++ "SIGALRM", /* 14 */ ++ "SIGTERM", /* 15 */ ++ "SIGSTKFLT", /* 16 */ ++ "SIGCHLD", /* 17 */ ++ "SIGCONT", /* 18 */ ++ "SIGSTOP", /* 19 */ ++ "SIGTSTP", /* 20 */ ++ "SIGTTIN", /* 21 */ ++ "SIGTTOU", /* 22 */ ++ "SIGURG", /* 23 */ ++ "SIGXCPU", /* 24 */ ++ "SIGXFSZ", /* 25 */ ++ "SIGVTALRM", /* 26 */ ++ "SIGPROF", /* 27 */ ++ "SIGWINCH", /* 28 */ ++ "SIGIO", /* 29 */ ++ "SIGPWR", /* 30 */ ++ "SIGSYS", /* 31 */ +Index: ltrace/sysdeps/linux-gnu/aarch64/syscallent.h +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ ltrace/sysdeps/linux-gnu/aarch64/syscallent.h 2014-03-12 16:20:44.016676607 -0600 +@@ -0,0 +1,1100 @@ ++/* ++ * This file is part of ltrace. ++ * Copyright (C) 2014 Petr Machata, Red Hat, Inc. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ */ ++ ++ "io_setup", /* 0 */ ++ "io_destroy", /* 1 */ ++ "io_submit", /* 2 */ ++ "io_cancel", /* 3 */ ++ "io_getevents", /* 4 */ ++ "setxattr", /* 5 */ ++ "lsetxattr", /* 6 */ ++ "fsetxattr", /* 7 */ ++ "getxattr", /* 8 */ ++ "lgetxattr", /* 9 */ ++ "fgetxattr", /* 10 */ ++ "listxattr", /* 11 */ ++ "llistxattr", /* 12 */ ++ "flistxattr", /* 13 */ ++ "removexattr", /* 14 */ ++ "lremovexattr", /* 15 */ ++ "fremovexattr", /* 16 */ ++ "getcwd", /* 17 */ ++ "lookup_dcookie", /* 18 */ ++ "eventfd2", /* 19 */ ++ "epoll_create1", /* 20 */ ++ "epoll_ctl", /* 21 */ ++ "epoll_pwait", /* 22 */ ++ "dup", /* 23 */ ++ "dup3", /* 24 */ ++ "fcntl", /* 25 */ ++ "inotify_init1", /* 26 */ ++ "inotify_add_watch", /* 27 */ ++ "inotify_rm_watch", /* 28 */ ++ "ioctl", /* 29 */ ++ "ioprio_set", /* 30 */ ++ "ioprio_get", /* 31 */ ++ "flock", /* 32 */ ++ "mknodat", /* 33 */ ++ "mkdirat", /* 34 */ ++ "unlinkat", /* 35 */ ++ "symlinkat", /* 36 */ ++ "linkat", /* 37 */ ++ "renameat", /* 38 */ ++ "umount2", /* 39 */ ++ "mount", /* 40 */ ++ "pivot_root", /* 41 */ ++ "nfsservctl", /* 42 */ ++ "statfs", /* 43 */ ++ "fstatfs", /* 44 */ ++ "truncate", /* 45 */ ++ "ftruncate", /* 46 */ ++ "fallocate", /* 47 */ ++ "faccessat", /* 48 */ ++ "chdir", /* 49 */ ++ "fchdir", /* 50 */ ++ "chroot", /* 51 */ ++ "fchmod", /* 52 */ ++ "fchmodat", /* 53 */ ++ "fchownat", /* 54 */ ++ "fchown", /* 55 */ ++ "openat", /* 56 */ ++ "close", /* 57 */ ++ "vhangup", /* 58 */ ++ "pipe2", /* 59 */ ++ "quotactl", /* 60 */ ++ "getdents64", /* 61 */ ++ "lseek", /* 62 */ ++ "read", /* 63 */ ++ "write", /* 64 */ ++ "readv", /* 65 */ ++ "writev", /* 66 */ ++ "pread64", /* 67 */ ++ "pwrite64", /* 68 */ ++ "preadv", /* 69 */ ++ "pwritev", /* 70 */ ++ "sendfile", /* 71 */ ++ "pselect6", /* 72 */ ++ "ppoll", /* 73 */ ++ "signalfd4", /* 74 */ ++ "vmsplice", /* 75 */ ++ "splice", /* 76 */ ++ "tee", /* 77 */ ++ "readlinkat", /* 78 */ ++ "fstatat", /* 79 */ ++ "fstat", /* 80 */ ++ "sync", /* 81 */ ++ "fsync", /* 82 */ ++ "fdatasync", /* 83 */ ++ "sync_file_range", /* 84 */ ++ "timerfd_create", /* 85 */ ++ "timerfd_settime", /* 86 */ ++ "timerfd_gettime", /* 87 */ ++ "utimensat", /* 88 */ ++ "acct", /* 89 */ ++ "capget", /* 90 */ ++ "capset", /* 91 */ ++ "personality", /* 92 */ ++ "exit", /* 93 */ ++ "exit_group", /* 94 */ ++ "waitid", /* 95 */ ++ "set_tid_address", /* 96 */ ++ "unshare", /* 97 */ ++ "futex", /* 98 */ ++ "set_robust_list", /* 99 */ ++ "get_robust_list", /* 100 */ ++ "nanosleep", /* 101 */ ++ "getitimer", /* 102 */ ++ "setitimer", /* 103 */ ++ "kexec_load", /* 104 */ ++ "init_module", /* 105 */ ++ "delete_module", /* 106 */ ++ "timer_create", /* 107 */ ++ "timer_gettime", /* 108 */ ++ "timer_getoverrun", /* 109 */ ++ "timer_settime", /* 110 */ ++ "timer_delete", /* 111 */ ++ "clock_settime", /* 112 */ ++ "clock_gettime", /* 113 */ ++ "clock_getres", /* 114 */ ++ "clock_nanosleep", /* 115 */ ++ "syslog", /* 116 */ ++ "ptrace", /* 117 */ ++ "sched_setparam", /* 118 */ ++ "sched_setscheduler", /* 119 */ ++ "sched_getscheduler", /* 120 */ ++ "sched_getparam", /* 121 */ ++ "sched_setaffinity", /* 122 */ ++ "sched_getaffinity", /* 123 */ ++ "sched_yield", /* 124 */ ++ "sched_get_priority_max", /* 125 */ ++ "sched_get_priority_min", /* 126 */ ++ "sched_rr_get_interval", /* 127 */ ++ "restart_syscall", /* 128 */ ++ "kill", /* 129 */ ++ "tkill", /* 130 */ ++ "tgkill", /* 131 */ ++ "sigaltstack", /* 132 */ ++ "rt_sigsuspend", /* 133 */ ++ "rt_sigaction", /* 134 */ ++ "rt_sigprocmask", /* 135 */ ++ "rt_sigpending", /* 136 */ ++ "rt_sigtimedwait", /* 137 */ ++ "rt_sigqueueinfo", /* 138 */ ++ "rt_sigreturn", /* 139 */ ++ "setpriority", /* 140 */ ++ "getpriority", /* 141 */ ++ "reboot", /* 142 */ ++ "setregid", /* 143 */ ++ "setgid", /* 144 */ ++ "setreuid", /* 145 */ ++ "setuid", /* 146 */ ++ "setresuid", /* 147 */ ++ "getresuid", /* 148 */ ++ "setresgid", /* 149 */ ++ "getresgid", /* 150 */ ++ "setfsuid", /* 151 */ ++ "setfsgid", /* 152 */ ++ "times", /* 153 */ ++ "setpgid", /* 154 */ ++ "getpgid", /* 155 */ ++ "getsid", /* 156 */ ++ "setsid", /* 157 */ ++ "getgroups", /* 158 */ ++ "setgroups", /* 159 */ ++ "uname", /* 160 */ ++ "sethostname", /* 161 */ ++ "setdomainname", /* 162 */ ++ "getrlimit", /* 163 */ ++ "setrlimit", /* 164 */ ++ "getrusage", /* 165 */ ++ "umask", /* 166 */ ++ "prctl", /* 167 */ ++ "getcpu", /* 168 */ ++ "gettimeofday", /* 169 */ ++ "settimeofday", /* 170 */ ++ "adjtimex", /* 171 */ ++ "getpid", /* 172 */ ++ "getppid", /* 173 */ ++ "getuid", /* 174 */ ++ "geteuid", /* 175 */ ++ "getgid", /* 176 */ ++ "getegid", /* 177 */ ++ "gettid", /* 178 */ ++ "sysinfo", /* 179 */ ++ "mq_open", /* 180 */ ++ "mq_unlink", /* 181 */ ++ "mq_timedsend", /* 182 */ ++ "mq_timedreceive", /* 183 */ ++ "mq_notify", /* 184 */ ++ "mq_getsetattr", /* 185 */ ++ "msgget", /* 186 */ ++ "msgctl", /* 187 */ ++ "msgrcv", /* 188 */ ++ "msgsnd", /* 189 */ ++ "semget", /* 190 */ ++ "semctl", /* 191 */ ++ "semtimedop", /* 192 */ ++ "semop", /* 193 */ ++ "shmget", /* 194 */ ++ "shmctl", /* 195 */ ++ "shmat", /* 196 */ ++ "shmdt", /* 197 */ ++ "socket", /* 198 */ ++ "socketpair", /* 199 */ ++ "bind", /* 200 */ ++ "listen", /* 201 */ ++ "accept", /* 202 */ ++ "connect", /* 203 */ ++ "getsockname", /* 204 */ ++ "getpeername", /* 205 */ ++ "sendto", /* 206 */ ++ "recvfrom", /* 207 */ ++ "setsockopt", /* 208 */ ++ "getsockopt", /* 209 */ ++ "shutdown", /* 210 */ ++ "sendmsg", /* 211 */ ++ "recvmsg", /* 212 */ ++ "readahead", /* 213 */ ++ "brk", /* 214 */ ++ "munmap", /* 215 */ ++ "mremap", /* 216 */ ++ "add_key", /* 217 */ ++ "request_key", /* 218 */ ++ "keyctl", /* 219 */ ++ "clone", /* 220 */ ++ "execve", /* 221 */ ++ "mmap", /* 222 */ ++ "fadvise64", /* 223 */ ++ "swapon", /* 224 */ ++ "swapoff", /* 225 */ ++ "mprotect", /* 226 */ ++ "msync", /* 227 */ ++ "mlock", /* 228 */ ++ "munlock", /* 229 */ ++ "mlockall", /* 230 */ ++ "munlockall", /* 231 */ ++ "mincore", /* 232 */ ++ "madvise", /* 233 */ ++ "remap_file_pages", /* 234 */ ++ "mbind", /* 235 */ ++ "get_mempolicy", /* 236 */ ++ "set_mempolicy", /* 237 */ ++ "migrate_pages", /* 238 */ ++ "move_pages", /* 239 */ ++ "rt_tgsigqueueinfo", /* 240 */ ++ "perf_event_open", /* 241 */ ++ "accept4", /* 242 */ ++ "recvmmsg", /* 243 */ ++ "arch_specific_syscall", /* 244 */ ++ "245", /* 245 */ ++ "246", /* 246 */ ++ "247", /* 247 */ ++ "248", /* 248 */ ++ "249", /* 249 */ ++ "250", /* 250 */ ++ "251", /* 251 */ ++ "252", /* 252 */ ++ "253", /* 253 */ ++ "254", /* 254 */ ++ "255", /* 255 */ ++ "256", /* 256 */ ++ "257", /* 257 */ ++ "258", /* 258 */ ++ "259", /* 259 */ ++ "wait4", /* 260 */ ++ "prlimit64", /* 261 */ ++ "fanotify_init", /* 262 */ ++ "fanotify_mark", /* 263 */ ++ "name_to_handle_at", /* 264 */ ++ "open_by_handle_at", /* 265 */ ++ "clock_adjtime", /* 266 */ ++ "syncfs", /* 267 */ ++ "setns", /* 268 */ ++ "sendmmsg", /* 269 */ ++ "process_vm_readv", /* 270 */ ++ "process_vm_writev", /* 271 */ ++ "kcmp", /* 272 */ ++ "finit_module", /* 273 */ ++ "syscalls", /* 274 */ ++ "275", /* 275 */ ++ "276", /* 276 */ ++ "277", /* 277 */ ++ "278", /* 278 */ ++ "279", /* 279 */ ++ "280", /* 280 */ ++ "281", /* 281 */ ++ "282", /* 282 */ ++ "283", /* 283 */ ++ "284", /* 284 */ ++ "285", /* 285 */ ++ "286", /* 286 */ ++ "287", /* 287 */ ++ "288", /* 288 */ ++ "289", /* 289 */ ++ "290", /* 290 */ ++ "291", /* 291 */ ++ "292", /* 292 */ ++ "293", /* 293 */ ++ "294", /* 294 */ ++ "295", /* 295 */ ++ "296", /* 296 */ ++ "297", /* 297 */ ++ "298", /* 298 */ ++ "299", /* 299 */ ++ "300", /* 300 */ ++ "301", /* 301 */ ++ "302", /* 302 */ ++ "303", /* 303 */ ++ "304", /* 304 */ ++ "305", /* 305 */ ++ "306", /* 306 */ ++ "307", /* 307 */ ++ "308", /* 308 */ ++ "309", /* 309 */ ++ "310", /* 310 */ ++ "311", /* 311 */ ++ "312", /* 312 */ ++ "313", /* 313 */ ++ "314", /* 314 */ ++ "315", /* 315 */ ++ "316", /* 316 */ ++ "317", /* 317 */ ++ "318", /* 318 */ ++ "319", /* 319 */ ++ "320", /* 320 */ ++ "321", /* 321 */ ++ "322", /* 322 */ ++ "323", /* 323 */ ++ "324", /* 324 */ ++ "325", /* 325 */ ++ "326", /* 326 */ ++ "327", /* 327 */ ++ "328", /* 328 */ ++ "329", /* 329 */ ++ "330", /* 330 */ ++ "331", /* 331 */ ++ "332", /* 332 */ ++ "333", /* 333 */ ++ "334", /* 334 */ ++ "335", /* 335 */ ++ "336", /* 336 */ ++ "337", /* 337 */ ++ "338", /* 338 */ ++ "339", /* 339 */ ++ "340", /* 340 */ ++ "341", /* 341 */ ++ "342", /* 342 */ ++ "343", /* 343 */ ++ "344", /* 344 */ ++ "345", /* 345 */ ++ "346", /* 346 */ ++ "347", /* 347 */ ++ "348", /* 348 */ ++ "349", /* 349 */ ++ "350", /* 350 */ ++ "351", /* 351 */ ++ "352", /* 352 */ ++ "353", /* 353 */ ++ "354", /* 354 */ ++ "355", /* 355 */ ++ "356", /* 356 */ ++ "357", /* 357 */ ++ "358", /* 358 */ ++ "359", /* 359 */ ++ "360", /* 360 */ ++ "361", /* 361 */ ++ "362", /* 362 */ ++ "363", /* 363 */ ++ "364", /* 364 */ ++ "365", /* 365 */ ++ "366", /* 366 */ ++ "367", /* 367 */ ++ "368", /* 368 */ ++ "369", /* 369 */ ++ "370", /* 370 */ ++ "371", /* 371 */ ++ "372", /* 372 */ ++ "373", /* 373 */ ++ "374", /* 374 */ ++ "375", /* 375 */ ++ "376", /* 376 */ ++ "377", /* 377 */ ++ "378", /* 378 */ ++ "379", /* 379 */ ++ "380", /* 380 */ ++ "381", /* 381 */ ++ "382", /* 382 */ ++ "383", /* 383 */ ++ "384", /* 384 */ ++ "385", /* 385 */ ++ "386", /* 386 */ ++ "387", /* 387 */ ++ "388", /* 388 */ ++ "389", /* 389 */ ++ "390", /* 390 */ ++ "391", /* 391 */ ++ "392", /* 392 */ ++ "393", /* 393 */ ++ "394", /* 394 */ ++ "395", /* 395 */ ++ "396", /* 396 */ ++ "397", /* 397 */ ++ "398", /* 398 */ ++ "399", /* 399 */ ++ "400", /* 400 */ ++ "401", /* 401 */ ++ "402", /* 402 */ ++ "403", /* 403 */ ++ "404", /* 404 */ ++ "405", /* 405 */ ++ "406", /* 406 */ ++ "407", /* 407 */ ++ "408", /* 408 */ ++ "409", /* 409 */ ++ "410", /* 410 */ ++ "411", /* 411 */ ++ "412", /* 412 */ ++ "413", /* 413 */ ++ "414", /* 414 */ ++ "415", /* 415 */ ++ "416", /* 416 */ ++ "417", /* 417 */ ++ "418", /* 418 */ ++ "419", /* 419 */ ++ "420", /* 420 */ ++ "421", /* 421 */ ++ "422", /* 422 */ ++ "423", /* 423 */ ++ "424", /* 424 */ ++ "425", /* 425 */ ++ "426", /* 426 */ ++ "427", /* 427 */ ++ "428", /* 428 */ ++ "429", /* 429 */ ++ "430", /* 430 */ ++ "431", /* 431 */ ++ "432", /* 432 */ ++ "433", /* 433 */ ++ "434", /* 434 */ ++ "435", /* 435 */ ++ "436", /* 436 */ ++ "437", /* 437 */ ++ "438", /* 438 */ ++ "439", /* 439 */ ++ "440", /* 440 */ ++ "441", /* 441 */ ++ "442", /* 442 */ ++ "443", /* 443 */ ++ "444", /* 444 */ ++ "445", /* 445 */ ++ "446", /* 446 */ ++ "447", /* 447 */ ++ "448", /* 448 */ ++ "449", /* 449 */ ++ "450", /* 450 */ ++ "451", /* 451 */ ++ "452", /* 452 */ ++ "453", /* 453 */ ++ "454", /* 454 */ ++ "455", /* 455 */ ++ "456", /* 456 */ ++ "457", /* 457 */ ++ "458", /* 458 */ ++ "459", /* 459 */ ++ "460", /* 460 */ ++ "461", /* 461 */ ++ "462", /* 462 */ ++ "463", /* 463 */ ++ "464", /* 464 */ ++ "465", /* 465 */ ++ "466", /* 466 */ ++ "467", /* 467 */ ++ "468", /* 468 */ ++ "469", /* 469 */ ++ "470", /* 470 */ ++ "471", /* 471 */ ++ "472", /* 472 */ ++ "473", /* 473 */ ++ "474", /* 474 */ ++ "475", /* 475 */ ++ "476", /* 476 */ ++ "477", /* 477 */ ++ "478", /* 478 */ ++ "479", /* 479 */ ++ "480", /* 480 */ ++ "481", /* 481 */ ++ "482", /* 482 */ ++ "483", /* 483 */ ++ "484", /* 484 */ ++ "485", /* 485 */ ++ "486", /* 486 */ ++ "487", /* 487 */ ++ "488", /* 488 */ ++ "489", /* 489 */ ++ "490", /* 490 */ ++ "491", /* 491 */ ++ "492", /* 492 */ ++ "493", /* 493 */ ++ "494", /* 494 */ ++ "495", /* 495 */ ++ "496", /* 496 */ ++ "497", /* 497 */ ++ "498", /* 498 */ ++ "499", /* 499 */ ++ "500", /* 500 */ ++ "501", /* 501 */ ++ "502", /* 502 */ ++ "503", /* 503 */ ++ "504", /* 504 */ ++ "505", /* 505 */ ++ "506", /* 506 */ ++ "507", /* 507 */ ++ "508", /* 508 */ ++ "509", /* 509 */ ++ "510", /* 510 */ ++ "511", /* 511 */ ++ "512", /* 512 */ ++ "513", /* 513 */ ++ "514", /* 514 */ ++ "515", /* 515 */ ++ "516", /* 516 */ ++ "517", /* 517 */ ++ "518", /* 518 */ ++ "519", /* 519 */ ++ "520", /* 520 */ ++ "521", /* 521 */ ++ "522", /* 522 */ ++ "523", /* 523 */ ++ "524", /* 524 */ ++ "525", /* 525 */ ++ "526", /* 526 */ ++ "527", /* 527 */ ++ "528", /* 528 */ ++ "529", /* 529 */ ++ "530", /* 530 */ ++ "531", /* 531 */ ++ "532", /* 532 */ ++ "533", /* 533 */ ++ "534", /* 534 */ ++ "535", /* 535 */ ++ "536", /* 536 */ ++ "537", /* 537 */ ++ "538", /* 538 */ ++ "539", /* 539 */ ++ "540", /* 540 */ ++ "541", /* 541 */ ++ "542", /* 542 */ ++ "543", /* 543 */ ++ "544", /* 544 */ ++ "545", /* 545 */ ++ "546", /* 546 */ ++ "547", /* 547 */ ++ "548", /* 548 */ ++ "549", /* 549 */ ++ "550", /* 550 */ ++ "551", /* 551 */ ++ "552", /* 552 */ ++ "553", /* 553 */ ++ "554", /* 554 */ ++ "555", /* 555 */ ++ "556", /* 556 */ ++ "557", /* 557 */ ++ "558", /* 558 */ ++ "559", /* 559 */ ++ "560", /* 560 */ ++ "561", /* 561 */ ++ "562", /* 562 */ ++ "563", /* 563 */ ++ "564", /* 564 */ ++ "565", /* 565 */ ++ "566", /* 566 */ ++ "567", /* 567 */ ++ "568", /* 568 */ ++ "569", /* 569 */ ++ "570", /* 570 */ ++ "571", /* 571 */ ++ "572", /* 572 */ ++ "573", /* 573 */ ++ "574", /* 574 */ ++ "575", /* 575 */ ++ "576", /* 576 */ ++ "577", /* 577 */ ++ "578", /* 578 */ ++ "579", /* 579 */ ++ "580", /* 580 */ ++ "581", /* 581 */ ++ "582", /* 582 */ ++ "583", /* 583 */ ++ "584", /* 584 */ ++ "585", /* 585 */ ++ "586", /* 586 */ ++ "587", /* 587 */ ++ "588", /* 588 */ ++ "589", /* 589 */ ++ "590", /* 590 */ ++ "591", /* 591 */ ++ "592", /* 592 */ ++ "593", /* 593 */ ++ "594", /* 594 */ ++ "595", /* 595 */ ++ "596", /* 596 */ ++ "597", /* 597 */ ++ "598", /* 598 */ ++ "599", /* 599 */ ++ "600", /* 600 */ ++ "601", /* 601 */ ++ "602", /* 602 */ ++ "603", /* 603 */ ++ "604", /* 604 */ ++ "605", /* 605 */ ++ "606", /* 606 */ ++ "607", /* 607 */ ++ "608", /* 608 */ ++ "609", /* 609 */ ++ "610", /* 610 */ ++ "611", /* 611 */ ++ "612", /* 612 */ ++ "613", /* 613 */ ++ "614", /* 614 */ ++ "615", /* 615 */ ++ "616", /* 616 */ ++ "617", /* 617 */ ++ "618", /* 618 */ ++ "619", /* 619 */ ++ "620", /* 620 */ ++ "621", /* 621 */ ++ "622", /* 622 */ ++ "623", /* 623 */ ++ "624", /* 624 */ ++ "625", /* 625 */ ++ "626", /* 626 */ ++ "627", /* 627 */ ++ "628", /* 628 */ ++ "629", /* 629 */ ++ "630", /* 630 */ ++ "631", /* 631 */ ++ "632", /* 632 */ ++ "633", /* 633 */ ++ "634", /* 634 */ ++ "635", /* 635 */ ++ "636", /* 636 */ ++ "637", /* 637 */ ++ "638", /* 638 */ ++ "639", /* 639 */ ++ "640", /* 640 */ ++ "641", /* 641 */ ++ "642", /* 642 */ ++ "643", /* 643 */ ++ "644", /* 644 */ ++ "645", /* 645 */ ++ "646", /* 646 */ ++ "647", /* 647 */ ++ "648", /* 648 */ ++ "649", /* 649 */ ++ "650", /* 650 */ ++ "651", /* 651 */ ++ "652", /* 652 */ ++ "653", /* 653 */ ++ "654", /* 654 */ ++ "655", /* 655 */ ++ "656", /* 656 */ ++ "657", /* 657 */ ++ "658", /* 658 */ ++ "659", /* 659 */ ++ "660", /* 660 */ ++ "661", /* 661 */ ++ "662", /* 662 */ ++ "663", /* 663 */ ++ "664", /* 664 */ ++ "665", /* 665 */ ++ "666", /* 666 */ ++ "667", /* 667 */ ++ "668", /* 668 */ ++ "669", /* 669 */ ++ "670", /* 670 */ ++ "671", /* 671 */ ++ "672", /* 672 */ ++ "673", /* 673 */ ++ "674", /* 674 */ ++ "675", /* 675 */ ++ "676", /* 676 */ ++ "677", /* 677 */ ++ "678", /* 678 */ ++ "679", /* 679 */ ++ "680", /* 680 */ ++ "681", /* 681 */ ++ "682", /* 682 */ ++ "683", /* 683 */ ++ "684", /* 684 */ ++ "685", /* 685 */ ++ "686", /* 686 */ ++ "687", /* 687 */ ++ "688", /* 688 */ ++ "689", /* 689 */ ++ "690", /* 690 */ ++ "691", /* 691 */ ++ "692", /* 692 */ ++ "693", /* 693 */ ++ "694", /* 694 */ ++ "695", /* 695 */ ++ "696", /* 696 */ ++ "697", /* 697 */ ++ "698", /* 698 */ ++ "699", /* 699 */ ++ "700", /* 700 */ ++ "701", /* 701 */ ++ "702", /* 702 */ ++ "703", /* 703 */ ++ "704", /* 704 */ ++ "705", /* 705 */ ++ "706", /* 706 */ ++ "707", /* 707 */ ++ "708", /* 708 */ ++ "709", /* 709 */ ++ "710", /* 710 */ ++ "711", /* 711 */ ++ "712", /* 712 */ ++ "713", /* 713 */ ++ "714", /* 714 */ ++ "715", /* 715 */ ++ "716", /* 716 */ ++ "717", /* 717 */ ++ "718", /* 718 */ ++ "719", /* 719 */ ++ "720", /* 720 */ ++ "721", /* 721 */ ++ "722", /* 722 */ ++ "723", /* 723 */ ++ "724", /* 724 */ ++ "725", /* 725 */ ++ "726", /* 726 */ ++ "727", /* 727 */ ++ "728", /* 728 */ ++ "729", /* 729 */ ++ "730", /* 730 */ ++ "731", /* 731 */ ++ "732", /* 732 */ ++ "733", /* 733 */ ++ "734", /* 734 */ ++ "735", /* 735 */ ++ "736", /* 736 */ ++ "737", /* 737 */ ++ "738", /* 738 */ ++ "739", /* 739 */ ++ "740", /* 740 */ ++ "741", /* 741 */ ++ "742", /* 742 */ ++ "743", /* 743 */ ++ "744", /* 744 */ ++ "745", /* 745 */ ++ "746", /* 746 */ ++ "747", /* 747 */ ++ "748", /* 748 */ ++ "749", /* 749 */ ++ "750", /* 750 */ ++ "751", /* 751 */ ++ "752", /* 752 */ ++ "753", /* 753 */ ++ "754", /* 754 */ ++ "755", /* 755 */ ++ "756", /* 756 */ ++ "757", /* 757 */ ++ "758", /* 758 */ ++ "759", /* 759 */ ++ "760", /* 760 */ ++ "761", /* 761 */ ++ "762", /* 762 */ ++ "763", /* 763 */ ++ "764", /* 764 */ ++ "765", /* 765 */ ++ "766", /* 766 */ ++ "767", /* 767 */ ++ "768", /* 768 */ ++ "769", /* 769 */ ++ "770", /* 770 */ ++ "771", /* 771 */ ++ "772", /* 772 */ ++ "773", /* 773 */ ++ "774", /* 774 */ ++ "775", /* 775 */ ++ "776", /* 776 */ ++ "777", /* 777 */ ++ "778", /* 778 */ ++ "779", /* 779 */ ++ "780", /* 780 */ ++ "781", /* 781 */ ++ "782", /* 782 */ ++ "783", /* 783 */ ++ "784", /* 784 */ ++ "785", /* 785 */ ++ "786", /* 786 */ ++ "787", /* 787 */ ++ "788", /* 788 */ ++ "789", /* 789 */ ++ "790", /* 790 */ ++ "791", /* 791 */ ++ "792", /* 792 */ ++ "793", /* 793 */ ++ "794", /* 794 */ ++ "795", /* 795 */ ++ "796", /* 796 */ ++ "797", /* 797 */ ++ "798", /* 798 */ ++ "799", /* 799 */ ++ "800", /* 800 */ ++ "801", /* 801 */ ++ "802", /* 802 */ ++ "803", /* 803 */ ++ "804", /* 804 */ ++ "805", /* 805 */ ++ "806", /* 806 */ ++ "807", /* 807 */ ++ "808", /* 808 */ ++ "809", /* 809 */ ++ "810", /* 810 */ ++ "811", /* 811 */ ++ "812", /* 812 */ ++ "813", /* 813 */ ++ "814", /* 814 */ ++ "815", /* 815 */ ++ "816", /* 816 */ ++ "817", /* 817 */ ++ "818", /* 818 */ ++ "819", /* 819 */ ++ "820", /* 820 */ ++ "821", /* 821 */ ++ "822", /* 822 */ ++ "823", /* 823 */ ++ "824", /* 824 */ ++ "825", /* 825 */ ++ "826", /* 826 */ ++ "827", /* 827 */ ++ "828", /* 828 */ ++ "829", /* 829 */ ++ "830", /* 830 */ ++ "831", /* 831 */ ++ "832", /* 832 */ ++ "833", /* 833 */ ++ "834", /* 834 */ ++ "835", /* 835 */ ++ "836", /* 836 */ ++ "837", /* 837 */ ++ "838", /* 838 */ ++ "839", /* 839 */ ++ "840", /* 840 */ ++ "841", /* 841 */ ++ "842", /* 842 */ ++ "843", /* 843 */ ++ "844", /* 844 */ ++ "845", /* 845 */ ++ "846", /* 846 */ ++ "847", /* 847 */ ++ "848", /* 848 */ ++ "849", /* 849 */ ++ "850", /* 850 */ ++ "851", /* 851 */ ++ "852", /* 852 */ ++ "853", /* 853 */ ++ "854", /* 854 */ ++ "855", /* 855 */ ++ "856", /* 856 */ ++ "857", /* 857 */ ++ "858", /* 858 */ ++ "859", /* 859 */ ++ "860", /* 860 */ ++ "861", /* 861 */ ++ "862", /* 862 */ ++ "863", /* 863 */ ++ "864", /* 864 */ ++ "865", /* 865 */ ++ "866", /* 866 */ ++ "867", /* 867 */ ++ "868", /* 868 */ ++ "869", /* 869 */ ++ "870", /* 870 */ ++ "871", /* 871 */ ++ "872", /* 872 */ ++ "873", /* 873 */ ++ "874", /* 874 */ ++ "875", /* 875 */ ++ "876", /* 876 */ ++ "877", /* 877 */ ++ "878", /* 878 */ ++ "879", /* 879 */ ++ "880", /* 880 */ ++ "881", /* 881 */ ++ "882", /* 882 */ ++ "883", /* 883 */ ++ "884", /* 884 */ ++ "885", /* 885 */ ++ "886", /* 886 */ ++ "887", /* 887 */ ++ "888", /* 888 */ ++ "889", /* 889 */ ++ "890", /* 890 */ ++ "891", /* 891 */ ++ "892", /* 892 */ ++ "893", /* 893 */ ++ "894", /* 894 */ ++ "895", /* 895 */ ++ "896", /* 896 */ ++ "897", /* 897 */ ++ "898", /* 898 */ ++ "899", /* 899 */ ++ "900", /* 900 */ ++ "901", /* 901 */ ++ "902", /* 902 */ ++ "903", /* 903 */ ++ "904", /* 904 */ ++ "905", /* 905 */ ++ "906", /* 906 */ ++ "907", /* 907 */ ++ "908", /* 908 */ ++ "909", /* 909 */ ++ "910", /* 910 */ ++ "911", /* 911 */ ++ "912", /* 912 */ ++ "913", /* 913 */ ++ "914", /* 914 */ ++ "915", /* 915 */ ++ "916", /* 916 */ ++ "917", /* 917 */ ++ "918", /* 918 */ ++ "919", /* 919 */ ++ "920", /* 920 */ ++ "921", /* 921 */ ++ "922", /* 922 */ ++ "923", /* 923 */ ++ "924", /* 924 */ ++ "925", /* 925 */ ++ "926", /* 926 */ ++ "927", /* 927 */ ++ "928", /* 928 */ ++ "929", /* 929 */ ++ "930", /* 930 */ ++ "931", /* 931 */ ++ "932", /* 932 */ ++ "933", /* 933 */ ++ "934", /* 934 */ ++ "935", /* 935 */ ++ "936", /* 936 */ ++ "937", /* 937 */ ++ "938", /* 938 */ ++ "939", /* 939 */ ++ "940", /* 940 */ ++ "941", /* 941 */ ++ "942", /* 942 */ ++ "943", /* 943 */ ++ "944", /* 944 */ ++ "945", /* 945 */ ++ "946", /* 946 */ ++ "947", /* 947 */ ++ "948", /* 948 */ ++ "949", /* 949 */ ++ "950", /* 950 */ ++ "951", /* 951 */ ++ "952", /* 952 */ ++ "953", /* 953 */ ++ "954", /* 954 */ ++ "955", /* 955 */ ++ "956", /* 956 */ ++ "957", /* 957 */ ++ "958", /* 958 */ ++ "959", /* 959 */ ++ "960", /* 960 */ ++ "961", /* 961 */ ++ "962", /* 962 */ ++ "963", /* 963 */ ++ "964", /* 964 */ ++ "965", /* 965 */ ++ "966", /* 966 */ ++ "967", /* 967 */ ++ "968", /* 968 */ ++ "969", /* 969 */ ++ "970", /* 970 */ ++ "971", /* 971 */ ++ "972", /* 972 */ ++ "973", /* 973 */ ++ "974", /* 974 */ ++ "975", /* 975 */ ++ "976", /* 976 */ ++ "977", /* 977 */ ++ "978", /* 978 */ ++ "979", /* 979 */ ++ "980", /* 980 */ ++ "981", /* 981 */ ++ "982", /* 982 */ ++ "983", /* 983 */ ++ "984", /* 984 */ ++ "985", /* 985 */ ++ "986", /* 986 */ ++ "987", /* 987 */ ++ "988", /* 988 */ ++ "989", /* 989 */ ++ "990", /* 990 */ ++ "991", /* 991 */ ++ "992", /* 992 */ ++ "993", /* 993 */ ++ "994", /* 994 */ ++ "995", /* 995 */ ++ "996", /* 996 */ ++ "997", /* 997 */ ++ "998", /* 998 */ ++ "999", /* 999 */ ++ "1000", /* 1000 */ ++ "1001", /* 1001 */ ++ "1002", /* 1002 */ ++ "1003", /* 1003 */ ++ "1004", /* 1004 */ ++ "1005", /* 1005 */ ++ "1006", /* 1006 */ ++ "1007", /* 1007 */ ++ "1008", /* 1008 */ ++ "1009", /* 1009 */ ++ "1010", /* 1010 */ ++ "1011", /* 1011 */ ++ "1012", /* 1012 */ ++ "1013", /* 1013 */ ++ "1014", /* 1014 */ ++ "1015", /* 1015 */ ++ "1016", /* 1016 */ ++ "1017", /* 1017 */ ++ "1018", /* 1018 */ ++ "1019", /* 1019 */ ++ "1020", /* 1020 */ ++ "1021", /* 1021 */ ++ "1022", /* 1022 */ ++ "1023", /* 1023 */ ++ "open", /* 1024 */ ++ "link", /* 1025 */ ++ "unlink", /* 1026 */ ++ "mknod", /* 1027 */ ++ "chmod", /* 1028 */ ++ "chown", /* 1029 */ ++ "mkdir", /* 1030 */ ++ "rmdir", /* 1031 */ ++ "lchown", /* 1032 */ ++ "access", /* 1033 */ ++ "rename", /* 1034 */ ++ "readlink", /* 1035 */ ++ "symlink", /* 1036 */ ++ "utimes", /* 1037 */ ++ "stat", /* 1038 */ ++ "lstat", /* 1039 */ ++ "pipe", /* 1040 */ ++ "dup2", /* 1041 */ ++ "epoll_create", /* 1042 */ ++ "inotify_init", /* 1043 */ ++ "eventfd", /* 1044 */ ++ "signalfd", /* 1045 */ ++ "sendfile", /* 1046 */ ++ "ftruncate", /* 1047 */ ++ "truncate", /* 1048 */ ++ "stat", /* 1049 */ ++ "lstat", /* 1050 */ ++ "fstat", /* 1051 */ ++ "fcntl", /* 1052 */ ++ "fadvise64", /* 1053 */ ++ "newfstatat", /* 1054 */ ++ "fstatfs", /* 1055 */ ++ "statfs", /* 1056 */ ++ "lseek", /* 1057 */ ++ "mmap", /* 1058 */ ++ "alarm", /* 1059 */ ++ "getpgrp", /* 1060 */ ++ "pause", /* 1061 */ ++ "time", /* 1062 */ ++ "utime", /* 1063 */ ++ "creat", /* 1064 */ ++ "getdents", /* 1065 */ ++ "futimesat", /* 1066 */ ++ "select", /* 1067 */ ++ "poll", /* 1068 */ ++ "epoll_wait", /* 1069 */ ++ "ustat", /* 1070 */ ++ "vfork", /* 1071 */ ++ "oldwait4", /* 1072 */ ++ "recv", /* 1073 */ ++ "send", /* 1074 */ ++ "bdflush", /* 1075 */ ++ "umount", /* 1076 */ ++ "uselib", /* 1077 */ ++ "_sysctl", /* 1078 */ ++ "fork", /* 1079 */ +Index: ltrace/sysdeps/linux-gnu/aarch64/trace.c +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ ltrace/sysdeps/linux-gnu/aarch64/trace.c 2014-03-12 16:23:25.382866486 -0600 +@@ -0,0 +1,83 @@ ++/* ++ * This file is part of ltrace. ++ * Copyright (C) 2014 Petr Machata, Red Hat, Inc. ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation; either version 2 of the ++ * License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA ++ * 02110-1301 USA ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "backend.h" ++#include "proc.h" ++ ++void ++get_arch_dep(struct Process *proc) ++{ ++} ++ ++int aarch64_read_gregs(struct Process *proc, struct user_pt_regs *regs); ++ ++/* The syscall instruction is: ++ * | 31 21 | 20 5 | 4 0 | ++ * | 1 1 0 1 0 1 0 0 | 0 0 0 | imm16 | 0 0 0 0 1 | */ ++#define SVC_MASK 0xffe0001f ++#define SVC_VALUE 0xd4000001 ++ ++int ++syscall_p(struct Process *proc, int status, int *sysnum) ++{ ++ if (WIFSTOPPED(status) ++ && WSTOPSIG(status) == (SIGTRAP | proc->tracesysgood)) { ++ ++ struct user_pt_regs regs; ++ if (aarch64_read_gregs(proc, ®s) < 0) { ++ fprintf(stderr, "syscall_p: " ++ "Couldn't read registers of %d.\n", proc->pid); ++ return -1; ++ } ++ ++ errno = 0; ++ unsigned long insn = (unsigned long) ptrace(PTRACE_PEEKTEXT, ++ proc->pid, ++ regs.pc - 4, 0); ++ if (insn == -1UL && errno != 0) { ++ fprintf(stderr, "syscall_p: " ++ "Couldn't peek into %d: %s\n", proc->pid, ++ strerror(errno)); ++ return -1; ++ } ++ ++ insn &= 0xffffffffUL; ++ if ((insn & SVC_MASK) == SVC_VALUE) { ++ *sysnum = regs.regs[8]; ++ ++ size_t d1 = proc->callstack_depth - 1; ++ if (proc->callstack_depth > 0 ++ && proc->callstack[d1].is_syscall ++ && proc->callstack[d1].c_un.syscall == *sysnum) ++ return 2; ++ ++ return 1; ++ } ++ } ++ ++ return 0; ++} diff -Nru ltrace-0.7.3/debian/patches/jmp_irel.patch ltrace-0.7.3/debian/patches/jmp_irel.patch --- ltrace-0.7.3/debian/patches/jmp_irel.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/jmp_irel.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,449 @@ +From 73b85aadbf377541ac336914e5ff8ec521226a97 Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Wed, 30 Oct 2013 00:10:29 +0100 +Subject: Support tracing P_PPC64_JMP_IREL slots + +--- + callback.h | 3 + sysdeps/linux-gnu/ppc/arch.h | 12 ++ + sysdeps/linux-gnu/ppc/plt.c | 234 ++++++++++++++++++++++++++++++++++--------- + 3 files changed, 201 insertions(+), 48 deletions(-) + +Index: b/sysdeps/linux-gnu/ppc/arch.h +=================================================================== +--- a/sysdeps/linux-gnu/ppc/arch.h ++++ b/sysdeps/linux-gnu/ppc/arch.h +@@ -56,6 +56,9 @@ struct arch_ltelf_data { + GElf_Addr opd_base; + GElf_Xword opd_size; + int secure_plt; ++ ++ Elf_Data *reladyn; ++ size_t reladyn_count; + }; + + #define ARCH_HAVE_LIBRARY_DATA +@@ -79,6 +82,10 @@ enum ppc64_plt_type { + * corresponding PLT entry. The original is now saved in + * RESOLVED_VALUE. */ + PPC_PLT_RESOLVED, ++ ++ /* Very similar to PPC_PLT_UNRESOLVED, but for JMP_IREL ++ * slots. */ ++ PPC_PLT_IRELATIVE, + }; + + #define ARCH_HAVE_LIBRARY_SYMBOL_DATA +@@ -92,7 +99,10 @@ struct arch_library_symbol_data { + + #define ARCH_HAVE_BREAKPOINT_DATA + struct arch_breakpoint_data { +- /* We need this just for arch_breakpoint_init. */ ++ /* This is where we hide symbol for IRELATIVE breakpoint for ++ * the first time that it hits. This is NULL for normal ++ * breakpoints. */ ++ struct library_symbol *irel_libsym; + }; + + #define ARCH_HAVE_PROCESS_DATA +Index: b/sysdeps/linux-gnu/ppc/plt.c +=================================================================== +--- a/sysdeps/linux-gnu/ppc/plt.c ++++ b/sysdeps/linux-gnu/ppc/plt.c +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + #include + + #include "proc.h" +@@ -34,6 +35,8 @@ + #include "breakpoint.h" + #include "linux-gnu/trace.h" + #include "backend.h" ++#include "vect.h" ++#include "trace.h" + + /* There are two PLT types on 32-bit PPC: old-style, BSS PLT, and + * new-style "secure" PLT. We can tell one from the other by the +@@ -104,6 +107,21 @@ + * through half the dynamic linker, we just let the thread run and hit + * this breakpoint. When it hits, we know the PLT entry was resolved. + * ++ * Another twist comes from tracing slots corresponding to ++ * R_PPC64_JMP_IREL relocations. These have no dedicated PLT entry. ++ * The calls are done directly from stubs, and the .plt entry ++ * (actually .iplt entry, these live in a special section) is resolved ++ * in advance before the binary starts. Because there's no PLT entry, ++ * we put the PLT breakpoints directly to the IFUNC resolver code, and ++ * then would like them to behave like ordinary PLT slots, including ++ * catching the point where these get resolved to unresolve them. So ++ * for the first call (which is the actual resolver call), we pretend ++ * that this breakpoint is artificial and has no associated symbol, ++ * and turn it on fully only after the first hit. Ideally we would ++ * trace that first call as well, but then the stepper, which tries to ++ * catch the point where the slot is resolved, would hit the return ++ * breakpoint and that's not currently handled well. ++ * + * XXX TODO If we have hardware watch point, we might put a read watch + * on .plt slot, and discover the offenders this way. I don't know + * the details, but I assume at most a handful (like, one or two, if +@@ -177,10 +195,48 @@ mark_as_resolved(struct library_symbol * + libsym->arch.resolved_value = value; + } + ++static void ++ppc32_delayed_symbol(struct library_symbol *libsym) ++{ ++ /* arch_dynlink_done is called on attach as well. In that ++ * case some slots will have been resolved already. ++ * Unresolved PLT looks like this: ++ * ++ * : li r11,0 ++ * : b "resolve" ++ * ++ * "resolve" is another address in PLTGOT (the same block that ++ * all the PLT slots are it). When resolved, it looks either ++ * this way: ++ * ++ * : b 0xfea88d0 ++ * ++ * Which is easy to detect. It can also look this way: ++ * ++ * : li r11,0 ++ * : b "dispatch" ++ * ++ * The "dispatch" address lies in PLTGOT as well. In current ++ * GNU toolchain, "dispatch" address is the same as PLTGOT ++ * address. We rely on this to figure out whether the address ++ * is resolved or not. */ ++ ++ uint32_t insn1 = libsym->arch.resolved_value >> 32; ++ uint32_t insn2 = (uint32_t) libsym->arch.resolved_value; ++ if ((insn1 & BRANCH_MASK) == B_INSN ++ || ((insn2 & BRANCH_MASK) == B_INSN ++ /* XXX double cast */ ++ && (ppc_branch_dest(libsym->enter_addr + 4, insn2) ++ == (arch_addr_t) (long) libsym->lib->arch.pltgot_addr))) ++ { ++ mark_as_resolved(libsym, libsym->arch.resolved_value); ++ } ++} ++ + void + arch_dynlink_done(struct Process *proc) + { +- /* On PPC32 with BSS PLT, we need to enable delayed symbols. */ ++ /* We may need to activate delayed symbols. */ + struct library_symbol *libsym = NULL; + while ((libsym = proc_each_symbol(proc, libsym, + library_symbol_delayed_cb, NULL))) { +@@ -193,47 +249,37 @@ arch_dynlink_done(struct Process *proc) + return; + } + +- /* arch_dynlink_done is called on attach as well. In +- * that case some slots will have been resolved +- * already. Unresolved PLT looks like this: +- * +- * : li r11,0 +- * : b "resolve" +- * +- * "resolve" is another address in PLTGOT (the same +- * block that all the PLT slots are it). When +- * resolved, it looks either this way: +- * +- * : b 0xfea88d0 +- * +- * Which is easy to detect. It can also look this +- * way: +- * +- * : li r11,0 +- * : b "dispatch" +- * +- * The "dispatch" address lies in PLTGOT as well. In +- * current GNU toolchain, "dispatch" address is the +- * same as PLTGOT address. We rely on this to figure +- * out whether the address is resolved or not. */ +- uint32_t insn1 = libsym->arch.resolved_value >> 32; +- uint32_t insn2 = (uint32_t)libsym->arch.resolved_value; +- if ((insn1 & BRANCH_MASK) == B_INSN +- || ((insn2 & BRANCH_MASK) == B_INSN +- /* XXX double cast */ +- && (ppc_branch_dest(libsym->enter_addr + 4, insn2) +- == (void*)(long)libsym->lib->arch.pltgot_addr))) +- mark_as_resolved(libsym, libsym->arch.resolved_value); ++ if (proc->e_machine == EM_PPC) ++ ppc32_delayed_symbol(libsym); + ++ fprintf(stderr, "activating %s\n", libsym->name); + if (proc_activate_delayed_symbol(proc, libsym) < 0) + return; + +- /* XXX double cast */ +- libsym->arch.plt_slot_addr +- = (GElf_Addr)(uintptr_t)libsym->enter_addr; ++ if (proc->e_machine == EM_PPC) ++ /* XXX double cast */ ++ libsym->arch.plt_slot_addr ++ = (GElf_Addr) (uintptr_t) libsym->enter_addr; + } + } + ++static bool ++reloc_is_irelative(int machine, GElf_Rela *rela) ++{ ++ bool irelative = false; ++ if (machine == EM_PPC64) { ++#ifdef R_PPC64_JMP_IREL ++ irelative = GELF_R_TYPE(rela->r_info) == R_PPC64_JMP_IREL; ++#endif ++ } else { ++ assert(machine == EM_PPC); ++#ifdef R_PPC_IRELATIVE ++ irelative = GELF_R_TYPE(rela->r_info) == R_PPC_IRELATIVE; ++#endif ++ } ++ return irelative; ++} ++ + GElf_Addr + arch_plt_sym_val(struct ltelf *lte, size_t ndx, GElf_Rela *rela) + { +@@ -244,10 +290,28 @@ arch_plt_sym_val(struct ltelf *lte, size + } else if (lte->ehdr.e_machine == EM_PPC) { + return rela->r_offset; + ++ /* Beyond this point, we are on PPC64, but don't have stub ++ * symbols. */ ++ ++ } else if (reloc_is_irelative(lte->ehdr.e_machine, rela)) { ++ ++ /* Put JMP_IREL breakpoint to resolver, since there's ++ * no dedicated PLT entry. */ ++ ++ assert(rela->r_addend != 0); ++ /* XXX double cast */ ++ arch_addr_t res_addr = (arch_addr_t) (uintptr_t) rela->r_addend; ++ if (arch_translate_address(lte, res_addr, &res_addr) < 0) { ++ fprintf(stderr, "Couldn't OPD-translate IRELATIVE " ++ "resolver address.\n"); ++ return 0; ++ } ++ /* XXX double cast */ ++ return (GElf_Addr) (uintptr_t) res_addr; ++ + } else { +- /* If we get here, we don't have stub symbols. In +- * that case we put brakpoints to PLT entries the same +- * as the PPC32 secure PLT case does. */ ++ /* We put brakpoints to PLT entries the same as the ++ * PPC32 secure PLT case does. */ + assert(lte->arch.plt_stub_vma != 0); + return lte->arch.plt_stub_vma + PPC64_PLT_STUB_SIZE * ndx; + } +@@ -425,6 +489,15 @@ nonzero_data(Elf_Data *data) + return 0; + } + ++static enum callback_status ++reloc_copy_if_irelative(GElf_Rela *rela, void *data) ++{ ++ struct ltelf *lte = data; ++ ++ return CBS_STOP_IF(reloc_is_irelative(lte->ehdr.e_machine, rela) ++ && VECT_PUSHBACK(<e->plt_relocs, rela) < 0); ++} ++ + int + arch_elf_init(struct ltelf *lte, struct library *lib) + { +@@ -453,8 +526,7 @@ arch_elf_init(struct ltelf *lte, struct + } + GElf_Addr glink_vma = get_glink_vma(lte, ppcgot, lte->plt_data); + +- assert(lte->relplt_size % 12 == 0); +- size_t count = lte->relplt_size / 12; // size of RELA entry ++ size_t count = vect_size(<e->plt_relocs); + lte->arch.plt_stub_vma = glink_vma + - (GElf_Addr)count * PPC_PLT_STUB_SIZE; + debug(1, "stub_vma is %#" PRIx64, lte->arch.plt_stub_vma); +@@ -556,6 +628,35 @@ arch_elf_init(struct ltelf *lte, struct + } + } + ++ /* On PPC64, IRELATIVE relocations actually relocate .iplt ++ * section, and as such are stored in .rela.dyn (where all ++ * non-PLT relocations are stored) instead of .rela.plt. Add ++ * these to lte->plt_relocs. */ ++ extern int read_relplt(struct ltelf *lte, Elf_Scn *scn, GElf_Shdr *shdr, ++ struct vect *ret); ++ ++ GElf_Addr rela, relasz; ++ Elf_Scn *rela_sec; ++ GElf_Shdr rela_shdr; ++ if (lte->ehdr.e_machine == EM_PPC64 ++ && load_dynamic_entry(lte, DT_RELA, &rela) == 0 ++ && load_dynamic_entry(lte, DT_RELASZ, &relasz) == 0 ++ && elf_get_section_covering(lte, rela, &rela_sec, &rela_shdr) == 0 ++ && rela_sec != NULL) { ++ ++ struct vect v; ++ VECT_INIT(&v, GElf_Rela); ++ int ret = read_relplt(lte, rela_sec, &rela_shdr, &v); ++ if (ret >= 0 ++ && VECT_EACH(&v, GElf_Rela, NULL, ++ reloc_copy_if_irelative, lte) != NULL) ++ ret = -1; ++ ++ VECT_DESTROY(&v, GElf_Rela, NULL, NULL); ++ ++ if (ret < 0) ++ return ret; ++ } + return 0; + } + +@@ -616,6 +717,16 @@ arch_elf_add_plt_entry(struct Process *p + return plt_ok; + } + ++ bool is_irelative = reloc_is_irelative(lte->ehdr.e_machine, rela); ++ char *name; ++ if (is_irelative) ++ name = linux_elf_find_irelative_name(lte, rela); ++ else ++ name = strdup(a_name); ++ ++ if (name == NULL) ++ return plt_fail; ++ + /* PPC64. If we have stubs, we return a chain of breakpoint + * sites, one for each stub that corresponds to this PLT + * entry. */ +@@ -623,7 +734,7 @@ arch_elf_add_plt_entry(struct Process *p + struct library_symbol **symp; + for (symp = <e->arch.stubs; *symp != NULL; ) { + struct library_symbol *sym = *symp; +- if (strcmp(sym->name, a_name) != 0) { ++ if (strcmp(sym->name, name) != 0) { + symp = &(*symp)->next; + continue; + } +@@ -636,6 +747,7 @@ arch_elf_add_plt_entry(struct Process *p + + if (chain != NULL) { + *ret = chain; ++ free(name); + return plt_ok; + } + +@@ -652,12 +764,13 @@ arch_elf_add_plt_entry(struct Process *p + || plt_slot_addr < lte->plt_addr + lte->plt_size); + + GElf_Addr plt_slot_value; +- if (read_plt_slot_value(proc, plt_slot_addr, &plt_slot_value) < 0) ++ if (read_plt_slot_value(proc, plt_slot_addr, &plt_slot_value) < 0) { ++ free(name); + return plt_fail; ++ } + +- char *name = strdup(a_name); + struct library_symbol *libsym = malloc(sizeof(*libsym)); +- if (name == NULL || libsym == NULL) { ++ if (libsym == NULL) { + fprintf(stderr, "allocation for .plt slot: %s\n", + strerror(errno)); + fail: +@@ -669,12 +782,13 @@ arch_elf_add_plt_entry(struct Process *p + /* XXX The double cast should be removed when + * arch_addr_t becomes integral type. */ + if (library_symbol_init(libsym, +- (arch_addr_t)(uintptr_t)plt_entry_addr, ++ (arch_addr_t) (uintptr_t) plt_entry_addr, + name, 1, LS_TOPLT_EXEC) < 0) + goto fail; + libsym->arch.plt_slot_addr = plt_slot_addr; + +- if (plt_slot_value == plt_entry_addr || plt_slot_value == 0) { ++ if (! is_irelative ++ && (plt_slot_value == plt_entry_addr || plt_slot_value == 0)) { + libsym->arch.type = PPC_PLT_UNRESOLVED; + libsym->arch.resolved_value = plt_entry_addr; + +@@ -692,7 +806,13 @@ arch_elf_add_plt_entry(struct Process *p + library_symbol_destroy(libsym); + goto fail; + } +- mark_as_resolved(libsym, plt_slot_value); ++ ++ if (! is_irelative) { ++ mark_as_resolved(libsym, plt_slot_value); ++ } else { ++ libsym->arch.type = PPC_PLT_IRELATIVE; ++ libsym->arch.resolved_value = plt_entry_addr; ++ } + } + + *ret = libsym; +@@ -839,6 +959,15 @@ jump_to_entry_point(struct Process *proc + static void + ppc_plt_bp_continue(struct breakpoint *bp, struct Process *proc) + { ++ /* If this is a first call through IREL breakpoint, enable the ++ * symbol so that it doesn't look like an artificial ++ * breakpoint anymore. */ ++ if (bp->libsym == NULL) { ++ assert(bp->arch.irel_libsym != NULL); ++ bp->libsym = bp->arch.irel_libsym; ++ bp->arch.irel_libsym = NULL; ++ } ++ + switch (bp->libsym->arch.type) { + struct Process *leader; + void (*on_all_stopped)(struct process_stopping_handler *); +@@ -851,6 +980,7 @@ ppc_plt_bp_continue(struct breakpoint *b + assert(bp->libsym->lib->arch.bss_plt_prelinked == 0); + /* Fall through. */ + ++ case PPC_PLT_IRELATIVE: + case PPC_PLT_UNRESOLVED: + on_all_stopped = NULL; + keep_stepping_p = NULL; +@@ -977,6 +1107,8 @@ arch_library_symbol_clone(struct library + int + arch_breakpoint_init(struct Process *proc, struct breakpoint *bp) + { ++ bp->arch.irel_libsym = NULL; ++ + /* Artificial and entry-point breakpoints are plain. */ + if (bp->libsym == NULL || bp->libsym->plt_type != LS_TOPLT_EXEC) + return 0; +@@ -996,6 +1128,14 @@ arch_breakpoint_init(struct Process *pro + .on_retract = ppc_plt_bp_retract, + }; + breakpoint_set_callbacks(bp, &cbs); ++ ++ /* For JMP_IREL breakpoints, make the breakpoint look ++ * artificial by hiding the symbol. */ ++ if (bp->libsym->arch.type == PPC_PLT_IRELATIVE) { ++ bp->arch.irel_libsym = bp->libsym; ++ bp->libsym = NULL; ++ } ++ + return 0; + } + +Index: b/callback.h +=================================================================== +--- a/callback.h ++++ b/callback.h +@@ -47,4 +47,7 @@ enum callback_status { + * and return error. */ + }; + ++#define CBS_STOP_IF(X) ((X) ? CBS_STOP : CBS_CONT) ++#define CBS_CONT_IF(X) ((X) ? CBS_CONT : CBS_STOP) ++ + #endif /* _CALLBACK_H_ */ diff -Nru ltrace-0.7.3/debian/patches/keep_plt_reloc_in_vector_673ff510.patch ltrace-0.7.3/debian/patches/keep_plt_reloc_in_vector_673ff510.patch --- ltrace-0.7.3/debian/patches/keep_plt_reloc_in_vector_673ff510.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/keep_plt_reloc_in_vector_673ff510.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,450 @@ +From 673ff510953b65b844a58478aa434120f457c014 Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Fri, 25 Oct 2013 23:45:39 +0200 +Subject: Keep PLT relocations in a vector + +- That means we have to copy them out of ELF ahead of time instead of + referencing them from inside ELF on demand. But this way we can keep + one grand vector of all PLT-like relocations. On PPC, this makes + a difference: some PLT-like relocations (R_PPC64_JMP_IREL in + particular) are stored in .rela.dyn, not .rela.plt. +--- + libltrace.c | 8 - + ltrace-elf.c | 195 +++++++++++++++++++++++++------------------ + ltrace-elf.h | 17 +-- + sysdeps/linux-gnu/mips/plt.c | 3 + 4 files changed, 129 insertions(+), 94 deletions(-) + +Index: b/libltrace.c +=================================================================== +--- a/libltrace.c ++++ b/libltrace.c +@@ -1,6 +1,6 @@ + /* + * This file is part of ltrace. +- * Copyright (C) 2011,2012 Petr Machata, Red Hat Inc. ++ * Copyright (C) 2011,2012,2013 Petr Machata, Red Hat Inc. + * Copyright (C) 2009 Juan Cespedes + * + * This program is free software; you can redistribute it and/or +@@ -136,9 +136,9 @@ ltrace_init(int argc, char **argv) { + if (command) { + /* Check that the binary ABI is supported before + * calling execute_program. */ +- struct ltelf lte = {}; +- open_elf(<e, command); +- do_close_elf(<e); ++ struct ltelf lte; ++ ltelf_init(<e, command); ++ ltelf_destroy(<e); + + pid_t pid = execute_program(command, argv); + struct Process *proc = open_program(command, pid); +Index: b/ltrace-elf.c +=================================================================== +--- a/ltrace-elf.c ++++ b/ltrace-elf.c +@@ -242,8 +242,9 @@ DEF_READER(elf_read_u64, 64) + #undef DEF_READER + + int +-open_elf(struct ltelf *lte, const char *filename) ++ltelf_init(struct ltelf *lte, const char *filename) + { ++ memset(lte, 0, sizeof *lte); + lte->fd = open(filename, O_RDONLY); + if (lte->fd == -1) + return 1; +@@ -293,9 +294,20 @@ open_elf(struct ltelf *lte, const char * + exit(EXIT_FAILURE); + } + ++ VECT_INIT(<e->plt_relocs, GElf_Rela); ++ + return 0; + } + ++void ++ltelf_destroy(struct ltelf *lte) ++{ ++ debug(DEBUG_FUNCTION, "close_elf()"); ++ elf_end(lte->elf); ++ close(lte->fd); ++ VECT_DESTROY(<e->plt_relocs, GElf_Rela, NULL, NULL); ++} ++ + static void + read_symbol_table(struct ltelf *lte, const char *filename, + Elf_Scn *scn, GElf_Shdr *shdr, const char *name, +@@ -333,13 +345,86 @@ read_symbol_table(struct ltelf *lte, con + } + + static int +-do_init_elf(struct ltelf *lte, const char *filename) ++rel_to_rela(struct ltelf *lte, const GElf_Rel *rel, GElf_Rela *rela) ++{ ++ rela->r_offset = rel->r_offset; ++ rela->r_info = rel->r_info; ++ ++ Elf_Scn *sec; ++ GElf_Shdr shdr; ++ if (elf_get_section_covering(lte, rel->r_offset, &sec, &shdr) < 0 ++ || sec == NULL) ++ return -1; ++ ++ Elf_Data *data = elf_loaddata(sec, &shdr); ++ if (data == NULL) ++ return -1; ++ ++ GElf_Xword offset = rel->r_offset - shdr.sh_addr - data->d_off; ++ uint64_t value; ++ if (lte->ehdr.e_ident[EI_CLASS] == ELFCLASS32) { ++ uint32_t tmp; ++ if (elf_read_u32(data, offset, &tmp) < 0) ++ return -1; ++ value = tmp; ++ } else if (elf_read_u64(data, offset, &value) < 0) { ++ return -1; ++ } ++ ++ rela->r_addend = value; ++ return 0; ++} ++ ++int ++read_relplt(struct ltelf *lte, Elf_Scn *scn, GElf_Shdr *shdr, ++ struct vect *rela_vec) ++{ ++ if (vect_reserve_additional(rela_vec, lte->ehdr.e_shnum) < 0) ++ return -1; ++ ++ Elf_Data *relplt = elf_loaddata(scn, shdr); ++ if (relplt == NULL) { ++ fprintf(stderr, "Couldn't load .rel*.plt data.\n"); ++ return -1; ++ } ++ ++ if ((shdr->sh_size % shdr->sh_entsize) != 0) { ++ fprintf(stderr, ".rel*.plt size (%" PRIx64 "d) not a multiple " ++ "of its sh_entsize (%" PRIx64 "d).\n", ++ shdr->sh_size, shdr->sh_entsize); ++ return -1; ++ } ++ ++ GElf_Xword relplt_count = shdr->sh_size / shdr->sh_entsize; ++ GElf_Xword i; ++ for (i = 0; i < relplt_count; ++i) { ++ GElf_Rela rela; ++ if (relplt->d_type == ELF_T_REL) { ++ GElf_Rel rel; ++ if (gelf_getrel(relplt, i, &rel) == NULL ++ || rel_to_rela(lte, &rel, &rela) < 0) ++ return -1; ++ ++ } else if (gelf_getrela(relplt, i, &rela) == NULL) { ++ return -1; ++ } ++ ++ if (VECT_PUSHBACK(rela_vec, &rela) < 0) ++ return -1; ++ } ++ ++ return 0; ++} ++ ++static int ++ltelf_read_elf(struct ltelf *lte, const char *filename) + { + int i; + GElf_Addr relplt_addr = 0; + GElf_Addr soname_offset = 0; ++ GElf_Xword relplt_size = 0; + +- debug(DEBUG_FUNCTION, "do_init_elf(filename=%s)", filename); ++ debug(DEBUG_FUNCTION, "ltelf_read_elf(filename=%s)", filename); + debug(1, "Reading ELF from %s...", filename); + + for (i = 1; i < lte->ehdr.e_shnum; ++i) { +@@ -398,7 +483,7 @@ do_init_elf(struct ltelf *lte, const cha + if (dyn.d_tag == DT_JMPREL) + relplt_addr = dyn.d_un.d_ptr; + else if (dyn.d_tag == DT_PLTRELSZ) +- lte->relplt_size = dyn.d_un.d_val; ++ relplt_size = dyn.d_un.d_val; + else if (dyn.d_tag == DT_SONAME) + soname_offset = dyn.d_un.d_val; + } +@@ -431,14 +516,9 @@ do_init_elf(struct ltelf *lte, const cha + + if (!relplt_addr || !lte->plt_addr) { + debug(1, "%s has no PLT relocations", filename); +- lte->relplt = NULL; +- lte->relplt_count = 0; +- } else if (lte->relplt_size == 0) { ++ } else if (relplt_size == 0) { + debug(1, "%s has unknown PLT size", filename); +- lte->relplt = NULL; +- lte->relplt_count = 0; + } else { +- + for (i = 1; i < lte->ehdr.e_shnum; ++i) { + Elf_Scn *scn; + GElf_Shdr shdr; +@@ -451,12 +531,9 @@ do_init_elf(struct ltelf *lte, const cha + exit(EXIT_FAILURE); + } + if (shdr.sh_addr == relplt_addr +- && shdr.sh_size == lte->relplt_size) { +- lte->relplt = elf_getdata(scn, NULL); +- lte->relplt_count = +- shdr.sh_size / shdr.sh_entsize; +- if (lte->relplt == NULL +- || elf_getdata(scn, lte->relplt) != NULL) { ++ && shdr.sh_size == relplt_size) { ++ if (read_relplt(lte, scn, &shdr, ++ <e->plt_relocs) < 0) { + fprintf(stderr, "Couldn't get .rel*.plt" + " data from \"%s\": %s\n", + filename, elf_errmsg(-1)); +@@ -472,9 +549,9 @@ do_init_elf(struct ltelf *lte, const cha + filename); + exit(EXIT_FAILURE); + } +- +- debug(1, "%s %zd PLT relocations", filename, lte->relplt_count); + } ++ debug(1, "%s %zd PLT relocations", filename, ++ vect_size(<e->plt_relocs)); + + if (soname_offset != 0) + lte->soname = lte->dynstr + soname_offset; +@@ -482,51 +559,13 @@ do_init_elf(struct ltelf *lte, const cha + return 0; + } + +-void +-do_close_elf(struct ltelf *lte) +-{ +- debug(DEBUG_FUNCTION, "do_close_elf()"); +- arch_elf_destroy(lte); +- elf_end(lte->elf); +- close(lte->fd); +-} +- +-int +-elf_get_sym_info(struct ltelf *lte, const char *filename, +- size_t sym_index, GElf_Rela *rela, GElf_Sym *sym) +-{ +- int i = sym_index; +- GElf_Rel rel; +- void *ret; +- +- if (lte->relplt->d_type == ELF_T_REL) { +- ret = gelf_getrel(lte->relplt, i, &rel); +- rela->r_offset = rel.r_offset; +- rela->r_info = rel.r_info; +- rela->r_addend = 0; +- } else { +- ret = gelf_getrela(lte->relplt, i, rela); +- } +- +- if (ret == NULL +- || ELF64_R_SYM(rela->r_info) >= lte->dynsym_count +- || gelf_getsym(lte->dynsym, ELF64_R_SYM(rela->r_info), +- sym) == NULL) { +- fprintf(stderr, +- "Couldn't get relocation from \"%s\": %s\n", +- filename, elf_errmsg(-1)); +- exit(EXIT_FAILURE); +- } +- +- return 0; +-} +- + #ifndef ARCH_HAVE_GET_SYMINFO + int + arch_get_sym_info(struct ltelf *lte, const char *filename, + size_t sym_index, GElf_Rela *rela, GElf_Sym *sym) + { +- return elf_get_sym_info(lte, filename, sym_index, rela, sym); ++ return gelf_getsym(lte->dynsym, ++ ELF64_R_SYM(rela->r_info), sym) != NULL ? 0 : -1; + } + #endif + +@@ -544,12 +583,13 @@ populate_plt(struct Process *proc, const + struct ltelf *lte, struct library *lib, + int latent_plts) + { ++ size_t count = vect_size(<e->plt_relocs); + size_t i; +- for (i = 0; i < lte->relplt_count; ++i) { +- GElf_Rela rela; ++ for (i = 0; i < count; ++i) { ++ GElf_Rela *rela = VECT_ELEMENT(<e->plt_relocs, GElf_Rela, i); + GElf_Sym sym; + +- if (arch_get_sym_info(lte, filename, i, &rela, &sym) < 0) ++ if (arch_get_sym_info(lte, filename, i, rela, &sym)) + continue; /* Skip this entry. */ + + char const *name = lte->dynstr + sym.st_name; +@@ -558,14 +598,14 @@ populate_plt(struct Process *proc, const + name, lib); + struct library_symbol *libsym = NULL; + switch (arch_elf_add_plt_entry(proc, lte, name, +- &rela, i, &libsym)) { ++ rela, i, &libsym)) { + case plt_fail: + return -1; + + case plt_default: + /* Add default entry to the beginning of LIBSYM. */ + if (default_elf_add_plt_entry(proc, lte, name, +- &rela, i, &libsym) < 0) ++ rela, i, &libsym) < 0) + return -1; + case plt_ok: + /* If we didn't match the PLT entry up there, +@@ -826,8 +866,8 @@ static int + read_module(struct library *lib, struct Process *proc, + const char *filename, GElf_Addr bias, int main) + { +- struct ltelf lte = {}; +- if (open_elf(<e, filename) < 0) ++ struct ltelf lte; ++ if (ltelf_init(<e, filename) < 0) + return -1; + + /* XXX When we abstract ABI into a module, this should instead +@@ -835,8 +875,8 @@ read_module(struct library *lib, struct + * + * proc->abi = arch_get_abi(lte.ehdr); + * +- * The code in open_elf needs to be replaced by this logic. +- * Be warned that libltrace.c calls open_elf as well to ++ * The code in ltelf_init needs to be replaced by this logic. ++ * Be warned that libltrace.c calls ltelf_init as well to + * determine whether ABI is supported. This is to get + * reasonable error messages when trying to run 64-bit binary + * with 32-bit ltrace. It is desirable to preserve this. */ +@@ -851,6 +891,8 @@ read_module(struct library *lib, struct + if (process_get_entry(proc, &entry, NULL) < 0) { + fprintf(stderr, "Couldn't find entry of PIE %s\n", + filename); ++ fail: ++ ltelf_destroy(<e); + return -1; + } + /* XXX The double cast should be removed when +@@ -875,19 +917,18 @@ read_module(struct library *lib, struct + fprintf(stderr, + "Couldn't determine base address of %s\n", + filename); +- return -1; ++ goto fail; + } + } + +- if (do_init_elf(<e, filename) < 0) +- return -1; ++ if (ltelf_read_elf(<e, filename) < 0) ++ goto fail; + + if (arch_elf_init(<e, lib) < 0) { + fprintf(stderr, "Backend initialization failed.\n"); +- return -1; ++ goto fail; + } + +- int status = 0; + if (lib == NULL) + goto fail; + +@@ -953,13 +994,9 @@ read_module(struct library *lib, struct + symtabs, exports) < 0) + goto fail; + +-done: +- do_close_elf(<e); +- return status; +- +-fail: +- status = -1; +- goto done; ++ arch_elf_destroy(<e); ++ ltelf_destroy(<e); ++ return 0; + } + + int +Index: b/ltrace-elf.h +=================================================================== +--- a/ltrace-elf.h ++++ b/ltrace-elf.h +@@ -27,6 +27,7 @@ + #include + #include + #include "sysdep.h" ++#include "vect.h" + + struct Process; + struct library; +@@ -48,9 +49,11 @@ struct ltelf { + GElf_Addr plt_addr; + GElf_Word plt_flags; + size_t plt_size; +- Elf_Data *relplt; + Elf_Data *plt_data; +- size_t relplt_count; ++ ++ /* Vector of GElf_Rela with PLT relocations. */ ++ struct vect plt_relocs; ++ + Elf_Data *symtab; + const char *strtab; + const char *soname; +@@ -60,15 +63,14 @@ struct ltelf { + size_t opd_size; + GElf_Addr dyn_addr; + size_t dyn_sz; +- size_t relplt_size; + GElf_Addr bias; + GElf_Addr entry_addr; + GElf_Addr base_addr; + struct arch_ltelf_data arch; + }; + +-int open_elf(struct ltelf *lte, const char *filename); +-void do_close_elf(struct ltelf *lte); ++int ltelf_init(struct ltelf *lte, const char *filename); ++void ltelf_destroy(struct ltelf *lte); + + /* XXX is it possible to put breakpoints in VDSO and VSYSCALL + * pseudo-libraries? For now we assume that all libraries can be +@@ -91,11 +93,6 @@ int default_elf_add_plt_entry(struct Pro + const char *a_name, GElf_Rela *rela, size_t ndx, + struct library_symbol **ret); + +-/* The base implementation of backend.h (arch_get_sym_info). +- * See backend.h for details. */ +-int elf_get_sym_info(struct ltelf *lte, const char *filename, +- size_t sym_index, GElf_Rela *rela, GElf_Sym *sym); +- + Elf_Data *elf_loaddata(Elf_Scn *scn, GElf_Shdr *shdr); + int elf_get_section_covering(struct ltelf *lte, GElf_Addr addr, + Elf_Scn **tgt_sec, GElf_Shdr *tgt_shdr); +Index: b/sysdeps/linux-gnu/mips/plt.c +=================================================================== +--- a/sysdeps/linux-gnu/mips/plt.c ++++ b/sysdeps/linux-gnu/mips/plt.c +@@ -159,7 +159,8 @@ arch_get_sym_info(struct ltelf *lte, con + const char *name; + + if (mips_elf_is_cpic(lte->ehdr.e_flags)) { +- return elf_get_sym_info(lte, filename, sym_index, rela, sym); ++ return gelf_getsym(lte->dynsym, ELF64_R_SYM(rela->r_info), ++ sym) != NULL ? 0 : -1; + } + + /* Fixup the offset. */ diff -Nru ltrace-0.7.3/debian/patches/lp1992939-add-intel-cet-support.patch ltrace-0.7.3/debian/patches/lp1992939-add-intel-cet-support.patch --- ltrace-0.7.3/debian/patches/lp1992939-add-intel-cet-support.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/lp1992939-add-intel-cet-support.patch 2024-02-02 13:02:23.000000000 +0100 @@ -0,0 +1,308 @@ +From ec563704ab5ef48bf308924de4ef021c4d0cd2af Mon Sep 17 00:00:00 2001 +From: DJ Delorie +Date: Sat, 13 Feb 2021 00:30:36 -0500 +Subject: [PATCH] Add Intel CET support + +Adds two features: + +1. Parsing CET-enabled PLTs. +2. Using a second "insecure" PLT as a fallback. + +This is downstream Fedora patch: ltrace-0.7.91-cet.patch +The downstream patch can be removed. + +Signed-off-by: DJ Delorie +========================================= +I enabled ARCH_HAVE_LTELF_DATA for x86 in order to include the code from the patch. It was originally enabled in this commit https://gitlab.com/cespedes/ltrace/-/commit/7b0088dbf6861987f13ad1e0740d633069fdb9d7. +There has been several changes to arch_elf_init (plt.c) since this^ commit. + +Origin: upstream, https://gitlab.com/cespedes/ltrace/-/commit/ec563704ab5ef48bf308924de4ef021c4d0cd2af +Co-Authored-By: Ravi Kant Sharma +Bug-Ubuntu: https://bugs.launchpad.net/ubuntu/+source/ltrace/+bug/1992939 +Last-Update: 2023-12-13 + +--- + debug.h | 1 + + ltrace-elf.c | 16 +++- + ltrace-elf.h | 1 + + sysdeps/linux-gnu/x86/arch.h | 15 +++- + sysdeps/linux-gnu/x86/plt.c | 162 ++++++++++++++++++++++++++++++++++- + 5 files changed, 191 insertions(+), 4 deletions(-) + +diff --git a/debug.h b/debug.h +index 4775d2f..e06fdf3 100644 +--- a/debug.h ++++ b/debug.h +@@ -20,6 +20,7 @@ + + #ifndef _DEBUG_H + #define _DEBUG_H ++#include "backend.h" + + /* debug levels: + */ +diff --git a/ltrace-elf.c b/ltrace-elf.c +index fadd3c1..6eada15 100644 +--- a/ltrace-elf.c ++++ b/ltrace-elf.c +@@ -632,7 +632,21 @@ ltelf_read_elf(struct ltelf *lte, const char *filename) + } + } else if (shdr.sh_type == SHT_PROGBITS + || shdr.sh_type == SHT_NOBITS) { +- if (strcmp(name, ".plt") == 0) { ++ if (strcmp(name, ".plt") == 0 ++ && lte->second_plt_seen == 0) { ++ lte->plt_addr = shdr.sh_addr; ++ lte->plt_size = shdr.sh_size; ++ lte->plt_data = elf_loaddata(scn, &shdr); ++ if (lte->plt_data == NULL) ++ fprintf(stderr, ++ "Can't load .plt data\n"); ++ lte->plt_flags = shdr.sh_flags; ++ } ++ /* An Intel CET binary has two PLTs; the ++ initial PLTGOT points to the second ++ one. */ ++ else if (strcmp(name, ".plt.sec") == 0) { ++ lte->second_plt_seen = 1; + lte->plt_addr = shdr.sh_addr; + lte->plt_size = shdr.sh_size; + lte->plt_data = elf_loaddata(scn, &shdr); +diff --git a/ltrace-elf.h b/ltrace-elf.h +index 42c2e05..2e00793 100644 +--- a/ltrace-elf.h ++++ b/ltrace-elf.h +@@ -47,6 +47,7 @@ struct ltelf { + Elf_Data *dynsym; + size_t dynsym_count; + const char *dynstr; ++ int second_plt_seen; + GElf_Addr plt_addr; + GElf_Word plt_flags; + size_t plt_size; +diff --git a/sysdeps/linux-gnu/x86/arch.h b/sysdeps/linux-gnu/x86/arch.h +index 329cfba..440020e 100644 +--- a/sysdeps/linux-gnu/x86/arch.h ++++ b/sysdeps/linux-gnu/x86/arch.h +@@ -1,6 +1,6 @@ + /* + * This file is part of ltrace. +- * Copyright (C) 2011, 2012 Petr Machata ++ * Copyright (C) 2011,2012,2013 Petr Machata, Red Hat Inc. + * Copyright (C) 2006 Ian Wienand + * Copyright (C) 2004 Juan Cespedes + * +@@ -19,6 +19,10 @@ + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ ++#ifndef LTRACE_X86_ARCH_H ++#define LTRACE_X86_ARCH_H ++ ++#include "vect.h" + + #define BREAKPOINT_VALUE {0xcc} + #define BREAKPOINT_LENGTH 1 +@@ -28,9 +32,18 @@ + #define ARCH_HAVE_ALIGNOF + #define ARCH_ENDIAN_LITTLE + ++#define ARCH_HAVE_ADD_PLT_ENTRY ++ ++#define ARCH_HAVE_LTELF_DATA ++struct arch_ltelf_data { ++ struct vect plt_map; ++}; ++ + #ifdef __x86_64__ + #define LT_ELFCLASS ELFCLASS64 + #define LT_ELF_MACHINE EM_X86_64 + #endif + #define LT_ELFCLASS2 ELFCLASS32 + #define LT_ELF_MACHINE2 EM_386 ++ ++#endif /* LTRACE_X86_ARCH_H */ +diff --git a/sysdeps/linux-gnu/x86/plt.c b/sysdeps/linux-gnu/x86/plt.c +index dc6f183..d885ab8 100644 +--- a/sysdeps/linux-gnu/x86/plt.c ++++ b/sysdeps/linux-gnu/x86/plt.c +@@ -19,16 +19,174 @@ + */ + + #include ++#include ++ + #include "proc.h" + #include "common.h" + #include "library.h" ++#include "trace.h" ++ ++static GElf_Addr ++x86_plt_offset(struct ltelf *lte, uint32_t i) ++{ ++ /* Skip the first PLT entry, which contains a stub to call the ++ * resolver. */ ++ return (i + (lte->second_plt_seen ? 0 : 1)) * 16; ++} + + GElf_Addr +-arch_plt_sym_val(struct ltelf *lte, size_t ndx, GElf_Rela * rela) { +- return lte->plt_addr + (ndx + 1) * 16; ++arch_plt_sym_val(struct ltelf *lte, size_t ndx, GElf_Rela *rela) ++{ ++ uint32_t i = *VECT_ELEMENT(<e->arch.plt_map, uint32_t, ndx); ++ return x86_plt_offset(lte, i) + lte->plt_addr; + } + + void * + sym2addr(Process *proc, struct library_symbol *sym) { + return sym->enter_addr; + } ++ ++enum plt_status ++arch_elf_add_plt_entry(Process *proc, struct ltelf *lte, ++ const char *a_name, GElf_Rela *rela, size_t ndx, ++ struct library_symbol **ret) ++{ ++ bool irelative = false; ++ if (lte->ehdr.e_machine == EM_X86_64) { ++#ifdef R_X86_64_IRELATIVE ++ irelative = GELF_R_TYPE(rela->r_info) == R_X86_64_IRELATIVE; ++#endif ++ } else { ++ assert(lte->ehdr.e_machine == EM_386); ++#ifdef R_386_IRELATIVE ++ irelative = GELF_R_TYPE(rela->r_info) == R_386_IRELATIVE; ++#endif ++ } ++ ++ if (irelative) ++ return linux_elf_add_plt_entry_irelative(proc, lte, rela, ++ ndx, ret); ++ ++ return plt_default; ++} ++ ++int ++arch_elf_init(struct ltelf *lte, struct library *lib) ++{ ++ VECT_INIT(<e->arch.plt_map, unsigned int); ++ ++ if (vect_reserve(<e->arch.plt_map, vect_size(<e->plt_relocs)) < 0) { ++ arch_elf_destroy(lte); ++ return -1; ++ } ++ ++ { ++ unsigned int i, sz = vect_size(<e->plt_relocs); ++ for (i = 0; i < sz; ++i) ++ vect_pushback (<e->arch.plt_map, &i); ++ } ++ ++ /* IRELATIVE slots may make the whole situation a fair deal ++ * more complex. On x86{,_64}, the PLT slots are not ++ * presented in the order of the corresponding relocations, ++ * but in the order it which these symbols are in the symbol ++ * table. That's static symbol table, which may be stripped ++ * off, not dynsym--that doesn't contain IFUNC symbols at all. ++ * So we have to decode each PLT entry to figure out what ++ * entry it corresponds to. We need to interpret the PLT ++ * table to figure this out. ++ * ++ * On i386, the PLT entry format is as follows: ++ * ++ * 8048300: ff 25 0c a0 04 08 jmp *0x804a00c ++ * 8048306: 68 20 00 00 00 push $0x20 ++ * 804830b: e9 e0 ff ff ff jmp 80482f0 <_init+0x30> ++ * ++ * For PIE binaries it is the following: ++ * ++ * 410: ff a3 10 00 00 00 jmp *0x10(%ebx) ++ * 416: 68 00 00 00 00 push $0x0 ++ * 41b: e9 d0 ff ff ff jmp 3f0 <_init+0x30> ++ * ++ * On x86_64, it is: ++ * ++ * 400420: ff 25 f2 0b 20 00 jmpq *0x200bf2(%rip) # 601018 <_GLOBAL_OFFSET_TABLE_+0x18> ++ * 400426: 68 00 00 00 00 pushq $0x0 ++ * 40042b: e9 e0 ff ff ff jmpq 400410 <_init+0x18> ++ * ++ * For CET binaries it is the following: ++ * ++ * 13d0: f3 0f 1e fa endbr64 ++ * 13d4: 68 27 00 00 00 pushq $0x27 <-- index ++ * 13d9: f2 e9 71 fd ff ff bnd jmpq 1150 <.plt> ++ * 13df: 90 nop ++ * ++ * On i386, the argument to push is an offset of relocation to ++ * use. The first PLT slot has an offset of 0x0, the second ++ * 0x8, etc. On x86_64, it's directly the index that we are ++ * looking for. ++ */ ++ ++ /* Here we scan the PLT table and initialize a map of ++ * relocation->slot number in lte->arch.plt_map. */ ++ ++ unsigned int i, sz = vect_size(<e->plt_relocs); ++ for (i = 0; i < sz; ++i) { ++ ++ GElf_Addr offset = x86_plt_offset(lte, i); ++ uint32_t reloc_arg; ++ ++ uint8_t byte; ++ if (elf_read_next_u8(lte->plt_data, &offset, &byte) < 0) ++ continue; ++ ++ ++ if (byte == 0xf3 ++ && elf_read_next_u8(lte->plt_data, &offset, &byte) >= 0 ++ && byte == 0x0f ++ && elf_read_next_u8(lte->plt_data, &offset, &byte) >= 0 ++ && byte == 0x1e ++ && elf_read_next_u8(lte->plt_data, &offset, &byte) >= 0 ++ && byte == 0xfa ++ && elf_read_next_u8(lte->plt_data, &offset, &byte) >= 0 ++ && byte == 0x68 ++ && elf_read_next_u32(lte->plt_data, ++ &offset, &reloc_arg) >= 0) ++ { ++ /* CET */ ++ fprintf(stderr, "%d: reloc_arg is %lx\n", i, (long)reloc_arg); ++ *VECT_ELEMENT(<e->arch.plt_map, unsigned int, reloc_arg) = i; ++ continue; ++ } ++ ++ if (byte != 0xff ++ || elf_read_next_u8(lte->plt_data, &offset, &byte) < 0 ++ || (byte != 0xa3 && byte != 0x25)) ++ continue; ++ ++ /* Skip immediate argument in the instruction. */ ++ offset += 4; ++ ++ if (elf_read_next_u8(lte->plt_data, &offset, &byte) < 0 ++ || byte != 0x68 ++ || elf_read_next_u32(lte->plt_data, ++ &offset, &reloc_arg) < 0) ++ continue; ++ ++ if (lte->ehdr.e_machine == EM_386) { ++ if (reloc_arg % 8 != 0) ++ continue; ++ reloc_arg /= 8; ++ } ++ ++ *VECT_ELEMENT(<e->arch.plt_map, unsigned int, reloc_arg) = i; ++ } ++ ++ return 0; ++} ++ ++void ++arch_elf_destroy(struct ltelf *lte) ++{ ++ VECT_DESTROY(<e->arch.plt_map, uint32_t, NULL, NULL); ++} +-- +2.40.1 + diff -Nru ltrace-0.7.3/debian/patches/Move-get_hfa_type-from-IA64-backend-to-type.c-name-i.patch ltrace-0.7.3/debian/patches/Move-get_hfa_type-from-IA64-backend-to-type.c-name-i.patch --- ltrace-0.7.3/debian/patches/Move-get_hfa_type-from-IA64-backend-to-type.c-name-i.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/Move-get_hfa_type-from-IA64-backend-to-type.c-name-i.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,163 @@ +Author: Petr Machata +Description: Move get_hfa_type from IA64 backend to type.c, name it type_get_hfa_type +Applied-Upstream: http://anonscm.debian.org/gitweb/?p=collab-maint/ltrace.git;a=commit;h=982cbca34b2b49a158086ff5f43eb9bba89edead +Last-Update: 2014-03-13 + +Index: ltrace/sysdeps/linux-gnu/ia64/fetch.c +=================================================================== +--- ltrace.orig/sysdeps/linux-gnu/ia64/fetch.c 2014-03-12 16:13:44.075726000 -0600 ++++ ltrace/sysdeps/linux-gnu/ia64/fetch.c 2014-03-13 09:32:30.504762084 -0600 +@@ -1,6 +1,6 @@ + /* + * This file is part of ltrace. +- * Copyright (C) 2012 Petr Machata, Red Hat Inc. ++ * Copyright (C) 2012,2013 Petr Machata, Red Hat Inc. + * Copyright (C) 2008,2009 Juan Cespedes + * Copyright (C) 2006 Steve Fink + * Copyright (C) 2006 Ian Wienand +@@ -249,37 +249,6 @@ + return 0; + } + +-static enum arg_type +-get_hfa_type(struct arg_type_info *info, size_t *countp) +-{ +- size_t n = type_aggregate_size(info); +- if (n == (size_t)-1) +- return ARGTYPE_VOID; +- +- enum arg_type type = ARGTYPE_VOID; +- *countp = 0; +- +- while (n-- > 0) { +- struct arg_type_info *emt = type_element(info, n); +- +- enum arg_type emt_type = emt->type; +- size_t emt_count = 1; +- if (emt_type == ARGTYPE_STRUCT || emt_type == ARGTYPE_ARRAY) +- emt_type = get_hfa_type(emt, &emt_count); +- +- if (type == ARGTYPE_VOID) { +- if (emt_type != ARGTYPE_FLOAT +- && emt_type != ARGTYPE_DOUBLE) +- return ARGTYPE_VOID; +- type = emt_type; +- } +- if (emt_type != type) +- return ARGTYPE_VOID; +- *countp += emt_count; +- } +- return type; +-} +- + static int + allocate_hfa(struct fetch_context *ctx, struct Process *proc, + struct arg_type_info *info, struct value *valuep, +@@ -380,10 +349,11 @@ + * floating-point registers, beginning with f8. */ + if (info->type == ARGTYPE_STRUCT || info->type == ARGTYPE_ARRAY) { + size_t hfa_size; +- enum arg_type hfa_type = get_hfa_type(info, &hfa_size); +- if (hfa_type != ARGTYPE_VOID && hfa_size <= 8) ++ struct arg_type_info *hfa_info ++ = type_get_hfa_type(info, &hfa_size); ++ if (hfa_info != NULL && hfa_size <= 8) + return allocate_hfa(ctx, proc, info, valuep, +- hfa_type, hfa_size); ++ hfa_info->type, hfa_size); + } + + /* Integers and pointers are passed in r8. 128-bit integers +@@ -409,7 +379,7 @@ + struct arg_type_info *info, struct value *valuep) + { + switch (info->type) { +- enum arg_type hfa_type; ++ struct arg_type_info *hfa_info; + size_t hfa_size; + + case ARGTYPE_VOID: +@@ -421,10 +391,10 @@ + return allocate_float(ctx, proc, info, valuep, 1); + + case ARGTYPE_STRUCT: +- hfa_type = get_hfa_type(info, &hfa_size); +- if (hfa_type != ARGTYPE_VOID) ++ hfa_info = type_get_hfa_type(info, &hfa_size); ++ if (hfa_info != NULL) + return allocate_hfa(ctx, proc, info, valuep, +- hfa_type, hfa_size); ++ hfa_info->type, hfa_size); + /* Fall through. */ + case ARGTYPE_CHAR: + case ARGTYPE_SHORT: +Index: ltrace/type.c +=================================================================== +--- ltrace.orig/type.c 2014-03-12 16:13:44.075726000 -0600 ++++ ltrace/type.c 2014-03-13 09:32:30.504762084 -0600 +@@ -568,3 +568,39 @@ + } + abort(); + } ++ ++struct arg_type_info * ++type_get_hfa_type(struct arg_type_info *info, size_t *countp) ++{ ++ assert(info != NULL); ++ if (info->type != ARGTYPE_STRUCT ++ && info->type != ARGTYPE_ARRAY) ++ return NULL; ++ ++ size_t n = type_aggregate_size(info); ++ if (n == (size_t)-1) ++ return NULL; ++ ++ struct arg_type_info *ret = NULL; ++ *countp = 0; ++ ++ while (n-- > 0) { ++ struct arg_type_info *emt = type_element(info, n); ++ ++ size_t emt_count = 1; ++ if (emt->type == ARGTYPE_STRUCT || emt->type == ARGTYPE_ARRAY) ++ emt = type_get_hfa_type(emt, &emt_count); ++ if (emt == NULL) ++ return NULL; ++ if (ret == NULL) { ++ if (emt->type != ARGTYPE_FLOAT ++ && emt->type != ARGTYPE_DOUBLE) ++ return NULL; ++ ret = emt; ++ } ++ if (emt->type != ret->type) ++ return NULL; ++ *countp += emt_count; ++ } ++ return ret; ++} +Index: ltrace/type.h +=================================================================== +--- ltrace.orig/type.h 2014-03-12 16:13:44.075726000 -0600 ++++ ltrace/type.h 2014-03-13 09:32:30.504762084 -0600 +@@ -1,6 +1,6 @@ + /* + * This file is part of ltrace. +- * Copyright (C) 2011,2012 Petr Machata, Red Hat Inc. ++ * Copyright (C) 2011,2012,2013 Petr Machata, Red Hat Inc. + * Copyright (C) 1997-2009 Juan Cespedes + * + * This program is free software; you can redistribute it and/or +@@ -142,4 +142,13 @@ + * type. */ + struct arg_type_info *type_get_fp_equivalent(struct arg_type_info *info); + ++/* If INFO is homogeneous floating-point aggregate, return the ++ * corresponding floating point type, and set *COUNTP to number of ++ * fields of the structure. Otherwise return NULL. INFO is a HFA if ++ * it's an aggregate whose each field is either a HFA, or a ++ * floating-point type. */ ++struct arg_type_info *type_get_hfa_type(struct arg_type_info *info, ++ size_t *countp); ++ ++ + #endif /* TYPE_H */ diff -Nru ltrace-0.7.3/debian/patches/on_install_breakpoint_56134ff5.patch ltrace-0.7.3/debian/patches/on_install_breakpoint_56134ff5.patch --- ltrace-0.7.3/debian/patches/on_install_breakpoint_56134ff5.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/on_install_breakpoint_56134ff5.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,78 @@ +From 56134ff5442bee4e128b189bb86cfc97dcb6f60a Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Fri, 10 Jan 2014 20:05:15 +0100 +Subject: Add a new per-breakpoint callback on_install + +--- + breakpoint.h | 9 ++++++++- + breakpoints.c | 11 ++++++++++- + 2 files changed, 18 insertions(+), 2 deletions(-) + +Index: b/breakpoint.h +=================================================================== +--- a/breakpoint.h ++++ b/breakpoint.h +@@ -1,6 +1,6 @@ + /* + * This file is part of ltrace. +- * Copyright (C) 2012 Petr Machata, Red Hat Inc. ++ * Copyright (C) 2012,2013,2014 Petr Machata, Red Hat Inc. + * Copyright (C) 2009 Juan Cespedes + * + * This program is free software; you can redistribute it and/or +@@ -48,6 +48,7 @@ struct breakpoint; + struct bp_callbacks { + void (*on_hit)(struct breakpoint *bp, struct Process *proc); + void (*on_continue)(struct breakpoint *bp, struct Process *proc); ++ void (*on_install)(struct breakpoint *bp, struct Process *proc); + void (*on_retract)(struct breakpoint *bp, struct Process *proc); + }; + +@@ -67,6 +68,12 @@ void breakpoint_on_hit(struct breakpoint + * continue_after_breakpoint. */ + void breakpoint_on_continue(struct breakpoint *bp, struct Process *proc); + ++/* Call ON_INSTALL handler of BP, if any is set. This should be ++ * called after the breakpoint is enabled for the first time, not ++ * every time it's enabled (such as after stepping over a site of a ++ * temporarily disabled breakpoint). */ ++void breakpoint_on_install(struct breakpoint *bp, struct Process *proc); ++ + /* Call on-retract handler of BP, if any is set. This should be + * called before the breakpoints are destroyed. The reason for a + * separate interface is that breakpoint_destroy has to be callable +Index: b/breakpoints.c +=================================================================== +--- a/breakpoints.c ++++ b/breakpoints.c +@@ -1,6 +1,6 @@ + /* + * This file is part of ltrace. +- * Copyright (C) 2006,2007,2011,2012 Petr Machata, Red Hat Inc. ++ * Copyright (C) 2006,2007,2011,2012,2013,2014 Petr Machata, Red Hat Inc. + * Copyright (C) 2009 Juan Cespedes + * Copyright (C) 1998,2001,2002,2003,2007,2008,2009 Juan Cespedes + * Copyright (C) 2006 Ian Wienand +@@ -78,6 +78,14 @@ breakpoint_on_continue(struct breakpoint + } + + void ++breakpoint_on_install(struct breakpoint *bp, struct Process *proc) ++{ ++ assert(bp != NULL); ++ if (bp->cbs != NULL && bp->cbs->on_install != NULL) ++ (bp->cbs->on_install)(bp, proc); ++} ++ ++void + breakpoint_on_retract(struct breakpoint *bp, struct Process *proc) + { + assert(bp != NULL); +@@ -181,6 +189,7 @@ breakpoint_turn_on(struct breakpoint *bp + if (bp->enabled == 1) { + assert(proc->pid != 0); + enable_breakpoint(proc, bp); ++ breakpoint_on_install(bp, proc); + } + return 0; + } diff -Nru ltrace-0.7.3/debian/patches/ppc64el.diff ltrace-0.7.3/debian/patches/ppc64el.diff --- ltrace-0.7.3/debian/patches/ppc64el.diff 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/ppc64el.diff 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,705 @@ +From eea4ad2cce289753aaa35b4e0258a76d8f8f367c Mon Sep 17 00:00:00 2001 +From: Thierry Fauck +Date: Tue, 13 May 2014 07:48:24 -0400 +Subject: [PATCH] Support for powerpc64 arch ppc64el + +Signed-off-by: Thierry Fauck + + Add support for ppc64le proc and ELF ABIv2. + Provides support for irelative and wchar +--- + configure.ac | 4 + ltrace-elf.h | 1 + sysdeps/linux-gnu/ppc/arch.h | 35 +++++- + sysdeps/linux-gnu/ppc/fetch.c | 244 ++++++++++++++++++++++++++++++++++++++---- + sysdeps/linux-gnu/ppc/plt.c | 51 +++++++- + sysdeps/linux-gnu/ppc/trace.c | 10 + + 6 files changed, 309 insertions(+), 36 deletions(-) + +Index: b/ltrace-elf.h +=================================================================== +--- a/ltrace-elf.h ++++ b/ltrace-elf.h +@@ -136,6 +136,7 @@ int elf_read_next_uleb128(Elf_Data *data + /* Return whether there's AMOUNT more bytes after OFFSET in DATA. */ + int elf_can_read_next(Elf_Data *data, GElf_Xword offset, GElf_Xword amount); + ++void delete_symbol_chain(struct library_symbol *); + #if __WORDSIZE == 32 + #define PRI_ELF_ADDR PRIx32 + #define GELF_ADDR_CAST(x) (void *)(uint32_t)(x) +Index: b/sysdeps/linux-gnu/ppc/arch.h +=================================================================== +--- a/sysdeps/linux-gnu/ppc/arch.h ++++ b/sysdeps/linux-gnu/ppc/arch.h +@@ -23,8 +23,8 @@ + #define LTRACE_PPC_ARCH_H + + #include ++#include + +-#define BREAKPOINT_VALUE { 0x7f, 0xe0, 0x00, 0x08 } + #define BREAKPOINT_LENGTH 4 + #define DECR_PC_AFTER_BREAK 0 + +@@ -34,15 +34,39 @@ + #ifdef __powerpc64__ // Says 'ltrace' is 64 bits, says nothing about target. + #define LT_ELFCLASS2 ELFCLASS64 + #define LT_ELF_MACHINE2 EM_PPC64 +-#define ARCH_SUPPORTS_OPD +-#endif ++ ++# ifdef __LITTLE_ENDIAN__ ++# define BREAKPOINT_VALUE { 0x08, 0x00, 0xe0, 0x7f } ++# define ARCH_ENDIAN_LITTLE ++# else ++# define BREAKPOINT_VALUE { 0x7f, 0xe0, 0x00, 0x08 } ++# define ARCH_SUPPORTS_OPD ++# define ARCH_ENDIAN_BIG ++# endif ++ ++# if _CALL_ELF != 2 ++# define ARCH_SUPPORTS_OPD ++# define STACK_FRAME_OVERHEAD 112 ++# ifndef EF_PPC64_ABI ++# define EF_PPC64_ABI 3 ++# endif ++# else /* _CALL_ELF == 2 ABIv2 */ ++# define STACK_FRAME_OVERHEAD 32 ++# endif /* CALL_ELF */ ++ ++#else ++#define BREAKPOINT_VALUE { 0x7f, 0xe0, 0x00, 0x08 } ++#define ARCH_ENDIAN_BIG ++# ifndef EF_PPC64_ABI ++# define EF_PPC64_ABI 3 ++# endif ++#endif /* __powerpc64__ */ + + #define ARCH_HAVE_ATOMIC_SINGLESTEP + #define ARCH_HAVE_ADD_PLT_ENTRY + #define ARCH_HAVE_TRANSLATE_ADDRESS + #define ARCH_HAVE_DYNLINK_DONE + #define ARCH_HAVE_FETCH_ARG +-#define ARCH_ENDIAN_BIG + #define ARCH_HAVE_SIZEOF + #define ARCH_HAVE_ALIGNOF + +@@ -55,7 +79,8 @@ struct arch_ltelf_data { + Elf_Data *opd_data; + GElf_Addr opd_base; + GElf_Xword opd_size; +- int secure_plt; ++ bool secure_plt : 1; ++ bool elfv2_abi : 1; + + Elf_Data *reladyn; + size_t reladyn_count; +Index: b/sysdeps/linux-gnu/ppc/fetch.c +=================================================================== +--- a/sysdeps/linux-gnu/ppc/fetch.c ++++ b/sysdeps/linux-gnu/ppc/fetch.c +@@ -30,9 +30,11 @@ + #include "ptrace.h" + #include "proc.h" + #include "value.h" ++#include "ltrace-elf.h" + + static int allocate_gpr(struct fetch_context *ctx, struct Process *proc, +- struct arg_type_info *info, struct value *valuep); ++ struct arg_type_info *info, struct value *valuep, ++ size_t off, bool is_hfa_type); + + /* Floating point registers have the same width on 32-bit as well as + * 64-bit PPC, but presents a different API depending on +@@ -62,7 +64,10 @@ struct fetch_context { + gregs64_t r64; + } regs; + struct fpregs_t fpregs; +- ++ int vgreg; ++ int struct_size; ++ int struct_hfa_size; ++ int struct_hfa_count; + }; + + static int +@@ -74,7 +79,8 @@ fetch_context_init(struct Process *proc, + if (proc->e_machine == EM_PPC) + context->stack_pointer = proc->stack_pointer + 8; + else +- context->stack_pointer = proc->stack_pointer + 112; ++ context->stack_pointer = proc->stack_pointer ++ + STACK_FRAME_OVERHEAD; + + /* When ltrace is 64-bit, we might use PTRACE_GETREGS to + * obtain 64-bit as well as 32-bit registers. But if we do it +@@ -118,6 +124,11 @@ arch_fetch_arg_init(enum tof type, struc + return NULL; + } + ++ context->vgreg = context->greg; ++ context->struct_size = 0; ++ context->struct_hfa_size = 0; ++ context->struct_hfa_count = 0; ++ + /* Aggregates or unions of any length, and character strings + * of length longer than 8 bytes, will be returned in a + * storage buffer allocated by the caller. The caller will +@@ -125,8 +136,20 @@ arch_fetch_arg_init(enum tof type, struc + * in r3, causing the first explicit argument to be passed in + * r4. */ + context->ret_struct = ret_info->type == ARGTYPE_STRUCT; +- if (context->ret_struct) ++ if (context->ret_struct) { ++#if _CALL_ELF == 2 ++ /* if R3 points to stack, parameters will be in R4. */ ++ uint64_t pstack_end = ptrace(PTRACE_PEEKTEXT, proc->pid, ++ proc->stack_pointer, 0); ++ if (((arch_addr_t)context->regs.r64[3] > proc->stack_pointer) ++ && (context->regs.r64[3] < pstack_end)) { ++ context->greg++; ++ context->stack_pointer += 8; ++ } ++#else + context->greg++; ++#endif ++ } + + return context; + } +@@ -144,7 +167,8 @@ arch_fetch_arg_clone(struct Process *pro + + static int + allocate_stack_slot(struct fetch_context *ctx, struct Process *proc, +- struct arg_type_info *info, struct value *valuep) ++ struct arg_type_info *info, struct value *valuep, ++ bool is_hfa_type) + { + size_t sz = type_sizeof(proc, info); + if (sz == (size_t)-1) +@@ -154,7 +178,14 @@ allocate_stack_slot(struct fetch_context + size_t off = 0; + if (proc->e_machine == EM_PPC && a < 4) + a = 4; ++#if _CALL_ELF == 2 ++ else if (proc->e_machine == EM_PPC64 && sz == 4 && is_hfa_type) ++ a = 4; ++ else ++ a = 8; ++#else + else if (proc->e_machine == EM_PPC64 && a < 8) ++#endif + a = 8; + + /* XXX Remove the two double casts when arch_addr_t +@@ -164,7 +195,7 @@ allocate_stack_slot(struct fetch_context + + if (valuep != NULL) + value_in_inferior(valuep, ctx->stack_pointer + off); +- ctx->stack_pointer += sz; ++ ctx->stack_pointer += a; + + return 0; + } +@@ -216,19 +247,34 @@ align_small_int(unsigned char *buf, size + + static int + allocate_gpr(struct fetch_context *ctx, struct Process *proc, +- struct arg_type_info *info, struct value *valuep) ++ struct arg_type_info *info, struct value *valuep, ++ size_t off, bool is_hfa_type) + { + if (ctx->greg > 10) +- return allocate_stack_slot(ctx, proc, info, valuep); ++ return allocate_stack_slot(ctx, proc, info, valuep, is_hfa_type); + +- int reg_num = ctx->greg++; +- if (valuep == NULL) +- return 0; ++ int reg_num = ctx->greg; + + size_t sz = type_sizeof(proc, info); + if (sz == (size_t)-1) + return -1; + assert(sz == 1 || sz == 2 || sz == 4 || sz == 8); ++#if _CALL_ELF == 2 ++ /* Consume the stack slot corresponding to this arg. */ ++ if ((sz + off) >= 8) ++ ctx->greg++; ++ ++ if (is_hfa_type) ++ ctx->stack_pointer += sz; ++ else ++ ctx->stack_pointer += 8; ++#else ++ ctx->greg++; ++#endif ++ ++ if (valuep == NULL) ++ return 0; ++ + if (value_reserve(valuep, sz) == NULL) + return -1; + +@@ -240,13 +286,14 @@ allocate_gpr(struct fetch_context *ctx, + u.i64 = read_gpr(ctx, proc, reg_num); + if (proc->e_machine == EM_PPC) + align_small_int(u.buf, 8, sz); +- memcpy(value_get_raw_data(valuep), u.buf, sz); ++ memcpy(value_get_raw_data(valuep), u.buf + off, sz); + return 0; + } + + static int + allocate_float(struct fetch_context *ctx, struct Process *proc, +- struct arg_type_info *info, struct value *valuep) ++ struct arg_type_info *info, struct value *valuep, ++ size_t off, bool is_hfa_type) + { + int pool = proc->e_machine == EM_PPC64 ? 13 : 8; + if (ctx->freg <= pool) { +@@ -257,8 +304,12 @@ allocate_float(struct fetch_context *ctx + } u = { .d = ctx->fpregs.fpregs[ctx->freg] }; + + ctx->freg++; ++ ++ if (!is_hfa_type) ++ ctx->vgreg++; ++ + if (proc->e_machine == EM_PPC64) +- allocate_gpr(ctx, proc, info, NULL); ++ allocate_gpr(ctx, proc, info, NULL, off, is_hfa_type); + + size_t sz = sizeof(double); + if (info->type == ARGTYPE_FLOAT) { +@@ -272,9 +323,129 @@ allocate_float(struct fetch_context *ctx + memcpy(value_get_raw_data(valuep), u.buf, sz); + return 0; + } +- return allocate_stack_slot(ctx, proc, info, valuep); ++ return allocate_stack_slot(ctx, proc, info, valuep, is_hfa_type); + } + ++#if _CALL_ELF == 2 ++static int ++allocate_hfa(struct fetch_context *ctx, struct Process *proc, ++ struct arg_type_info *info, struct value *valuep, ++ enum arg_type hfa_type, size_t hfa_count) ++{ ++ size_t sz = type_sizeof(proc, info); ++ if (sz == (size_t)-1) ++ return -1; ++ ++ ctx->struct_hfa_size += sz; ++ ++ /* There are two changes regarding structure return types: ++ * * heterogeneous float/vector structs are returned ++ * in (multiple) FP/vector registers, ++ * instead of via implicit reference. ++ * * small structs (up to 16 bytes) are return ++ * in one or two GPRs, instead of via implicit reference. ++ * ++ * Other structures (larger than 16 bytes, not heterogeneous) ++ * are still returned via implicit reference (i.e. a pointer ++ * to memory where to return the struct being passed in r3). ++ * Of course, whether or not an implicit reference pointer ++ * is present will shift the remaining arguments, ++ * so you need to get this right for ELFv2 in order ++ * to get the arguments correct. ++ * If an actual parameter is known to correspond to an HFA ++ * formal parameter, each element is passed in the next ++ * available floating-point argument register starting at fp1 ++ * until the fp13. The remaining elements of the aggregate are ++ * passed on the stack. */ ++ size_t slot_off = 0; ++ ++ unsigned char *buf = value_reserve(valuep, sz); ++ if (buf == NULL) ++ return -1; ++ ++ struct arg_type_info *hfa_info = type_get_simple(hfa_type); ++ size_t hfa_sz = type_sizeof(proc, hfa_info); ++ ++ if (hfa_count > 8) ++ ctx->struct_hfa_count += hfa_count; ++ ++ while (hfa_count > 0 && ctx->freg <= 13) { ++ int rc; ++ struct value tmp; ++ ++ value_init(&tmp, proc, NULL, hfa_info, 0); ++ ++ /* Hetereogeneous struct - get value on GPR or stack. */ ++ if (((hfa_type == ARGTYPE_FLOAT ++ || hfa_type == ARGTYPE_DOUBLE) ++ && hfa_count <= 8)) ++ rc = allocate_float(ctx, proc, hfa_info, &tmp, ++ slot_off, true); ++ else ++ rc = allocate_gpr(ctx, proc, hfa_info, &tmp, ++ slot_off, true); ++ ++ memcpy(buf, value_get_data(&tmp, NULL), hfa_sz); ++ ++ slot_off += hfa_sz; ++ buf += hfa_sz; ++ hfa_count--; ++ if (slot_off == 8) { ++ slot_off = 0; ++ ctx->vgreg++; ++ } ++ ++ value_destroy(&tmp); ++ if (rc < 0) ++ return -1; ++ } ++ if (hfa_count == 0) ++ return 0; ++ ++ /* if no remaining FP, GPR corresponding to slot is used ++ * Mostly it is in part of r10. */ ++ if (ctx->struct_hfa_size <= 64 && ctx->vgreg == 10) { ++ while (ctx->vgreg <= 10) { ++ struct value tmp; ++ value_init(&tmp, proc, NULL, hfa_info, 0); ++ union { ++ uint64_t i64; ++ unsigned char buf[0]; ++ } u; ++ ++ u.i64 = read_gpr(ctx, proc, ctx->vgreg); ++ ++ memcpy(buf, u.buf + slot_off, hfa_sz); ++ slot_off += hfa_sz; ++ buf += hfa_sz; ++ hfa_count--; ++ ctx->stack_pointer += hfa_sz; ++ if (slot_off >= 8 ) { ++ slot_off = 0; ++ ctx->vgreg++; ++ } ++ value_destroy(&tmp); ++ } ++ } ++ ++ if (hfa_count == 0) ++ return 0; ++ ++ /* Remaining values are on stack */ ++ while (hfa_count) { ++ struct value tmp; ++ value_init(&tmp, proc, NULL, hfa_info, 0); ++ ++ value_in_inferior(&tmp, ctx->stack_pointer); ++ memcpy(buf, value_get_data(&tmp, NULL), hfa_sz); ++ ctx->stack_pointer += hfa_sz; ++ buf += hfa_sz; ++ hfa_count--; ++ } ++ return 0; ++} ++#endif ++ + static int + allocate_argument(struct fetch_context *ctx, struct Process *proc, + struct arg_type_info *info, struct value *valuep) +@@ -287,13 +458,25 @@ allocate_argument(struct fetch_context * + + case ARGTYPE_FLOAT: + case ARGTYPE_DOUBLE: +- return allocate_float(ctx, proc, info, valuep); ++ return allocate_float(ctx, proc, info, valuep, ++ 8 - type_sizeof(proc,info), false); + + case ARGTYPE_STRUCT: + if (proc->e_machine == EM_PPC) { + if (value_pass_by_reference(valuep) < 0) + return -1; + } else { ++#if _CALL_ELF == 2 ++ struct arg_type_info *hfa_info; ++ size_t hfa_size; ++ hfa_info = type_get_hfa_type(info, &hfa_size); ++ if (hfa_info != NULL ) { ++ size_t sz = type_sizeof(proc, info); ++ ctx->struct_size += sz; ++ return allocate_hfa(ctx, proc, info, valuep, ++ hfa_info->type, hfa_size); ++ } ++#endif + /* PPC64: Fixed size aggregates and unions passed by + * value are mapped to as many doublewords of the + * parameter save area as the value uses in memory. +@@ -326,6 +509,10 @@ allocate_argument(struct fetch_context * + size_t sz = type_sizeof(proc, valuep->type); + if (sz == (size_t)-1) + return -1; ++ ++ if (ctx->ret_struct) ++ ctx->struct_size += sz; ++ + size_t slots = (sz + width - 1) / width; /* Round up. */ + unsigned char *buf = value_reserve(valuep, slots * width); + if (buf == NULL) +@@ -346,9 +533,11 @@ allocate_argument(struct fetch_context * + struct arg_type_info *fp_info + = type_get_fp_equivalent(valuep->type); + if (fp_info != NULL) +- rc = allocate_float(ctx, proc, fp_info, &val); ++ rc = allocate_float(ctx, proc, fp_info, &val, ++ 8-type_sizeof(proc,info), false); + else +- rc = allocate_gpr(ctx, proc, long_info, &val); ++ rc = allocate_gpr(ctx, proc, long_info, &val, ++ 0, false); + + if (rc >= 0) { + memcpy(ptr, value_get_data(&val, NULL), width); +@@ -363,6 +552,7 @@ allocate_argument(struct fetch_context * + return rc; + } + ++#ifndef __LITTLE_ENDIAN__ + /* Small values need post-processing. */ + if (sz < width) { + switch (info->type) { +@@ -394,6 +584,7 @@ allocate_argument(struct fetch_context * + break; + } + } ++#endif + + return 0; + } +@@ -411,7 +602,22 @@ arch_fetch_retval(struct fetch_context * + struct Process *proc, struct arg_type_info *info, + struct value *valuep) + { ++ if (fetch_context_init(proc, ctx) < 0) ++ return -1; ++ ++#if _CALL_ELF == 2 ++ void *ptr = (void *)(ctx->regs.r64[1]+32); ++ uint64_t val = ptrace(PTRACE_PEEKTEXT, proc->pid, ptr, 0); ++ ++ if (ctx->ret_struct ++ && ((ctx->struct_size > 64 ++ || ctx->struct_hfa_count > 8 ++ || (ctx->struct_hfa_size == 0 && ctx->struct_size > 56) ++ || (ctx->regs.r64[3] == ctx->regs.r64[1]+32) ++ || (ctx->regs.r64[3] == val )))) { ++#else + if (ctx->ret_struct) { ++#endif + assert(info->type == ARGTYPE_STRUCT); + + uint64_t addr = read_gpr(ctx, proc, 3); +@@ -424,8 +630,6 @@ arch_fetch_retval(struct fetch_context * + return 0; + } + +- if (fetch_context_init(proc, ctx) < 0) +- return -1; + return allocate_argument(ctx, proc, info, valuep); + } + +Index: b/sysdeps/linux-gnu/ppc/plt.c +=================================================================== +--- a/sysdeps/linux-gnu/ppc/plt.c ++++ b/sysdeps/linux-gnu/ppc/plt.c +@@ -131,7 +131,11 @@ + */ + + #define PPC_PLT_STUB_SIZE 16 +-#define PPC64_PLT_STUB_SIZE 8 //xxx ++#if _CALL_ELF != 2 ++#define PPC64_PLT_STUB_SIZE 8 ++#else ++#define PPC64_PLT_STUB_SIZE 4 ++#endif + + static inline int + host_powerpc64() +@@ -226,8 +230,13 @@ ppc32_delayed_symbol(struct library_symb + if ((insn1 & BRANCH_MASK) == B_INSN + || ((insn2 & BRANCH_MASK) == B_INSN + /* XXX double cast */ +- && (ppc_branch_dest(libsym->enter_addr + 4, insn2) +- == (arch_addr_t) (long) libsym->lib->arch.pltgot_addr))) ++#ifdef __LITTLE_ENDIAN__ ++ && (ppc_branch_dest(libsym->enter_addr + 4, insn1) ++ == (arch_addr_t) (long) libsym->lib->arch.pltgot_addr))) ++#else ++ && (ppc_branch_dest(libsym->enter_addr + 4, insn2) ++ == (arch_addr_t) (long) libsym->lib->arch.pltgot_addr))) ++#endif + { + mark_as_resolved(libsym, libsym->arch.resolved_value); + } +@@ -246,7 +255,7 @@ arch_dynlink_done(struct Process *proc) + "couldn't read PLT value for %s(%p): %s\n", + libsym->name, libsym->enter_addr, + strerror(errno)); +- return; ++ return; + } + + if (proc->e_machine == EM_PPC) +@@ -326,6 +335,7 @@ arch_translate_address_dyn(struct Proces + arch_addr_t addr, arch_addr_t *ret) + { + if (proc->e_machine == EM_PPC64) { ++#if _CALL_ELF != 2 + uint64_t value; + if (read_target_8(proc, addr, &value) < 0) { + fprintf(stderr, +@@ -337,6 +347,7 @@ arch_translate_address_dyn(struct Proces + * arch_addr_t becomes integral type. */ + *ret = (arch_addr_t)(uintptr_t)value; + return 0; ++#endif + } + + *ret = addr; +@@ -347,7 +358,8 @@ int + arch_translate_address(struct ltelf *lte, + arch_addr_t addr, arch_addr_t *ret) + { +- if (lte->ehdr.e_machine == EM_PPC64) { ++ if (lte->ehdr.e_machine == EM_PPC64 ++ && !lte->arch.elfv2_abi) { + /* XXX The double cast should be removed when + * arch_addr_t becomes integral type. */ + GElf_Xword offset +@@ -501,7 +513,16 @@ reloc_copy_if_irelative(GElf_Rela *rela, + int + arch_elf_init(struct ltelf *lte, struct library *lib) + { ++ ++ /* Check for ABIv2 in ELF header processor specific flag. */ ++#ifndef EF_PPC64_ABI ++ assert (! (lte->ehdr.e_flags & 3 ) == 2) ++#else ++ lte->arch.elfv2_abi=((lte->ehdr.e_flags & EF_PPC64_ABI) == 2) ; ++#endif ++ + if (lte->ehdr.e_machine == EM_PPC64 ++ && !lte->arch.elfv2_abi + && load_opd_data(lte, lib) < 0) + return -1; + +@@ -670,7 +691,7 @@ read_plt_slot_value(struct Process *proc + uint64_t l; + /* XXX double cast. */ + if (read_target_8(proc, (arch_addr_t)(uintptr_t)addr, &l) < 0) { +- fprintf(stderr, "ptrace .plt slot value @%#" PRIx64": %s\n", ++ debug(DEBUG_EVENT, "ptrace .plt slot value @%#" PRIx64": %s", + addr, strerror(errno)); + return -1; + } +@@ -687,7 +708,7 @@ unresolve_plt_slot(struct Process *proc, + * pointers intact. Hence the only adjustment that we need to + * do is to IP. */ + if (ptrace(PTRACE_POKETEXT, proc->pid, addr, value) < 0) { +- fprintf(stderr, "failed to unresolve .plt slot: %s\n", ++ debug(DEBUG_EVENT, "failed to unresolve .plt slot: %s", + strerror(errno)); + return -1; + } +@@ -763,10 +784,14 @@ arch_elf_add_plt_entry(struct Process *p + assert(plt_slot_addr >= lte->plt_addr + || plt_slot_addr < lte->plt_addr + lte->plt_size); + ++ /* Should avoid to do read if dynamic linker hasn't run yet ++ * or allow -1 a valid return code. */ + GElf_Addr plt_slot_value; + if (read_plt_slot_value(proc, plt_slot_addr, &plt_slot_value) < 0) { +- free(name); +- return plt_fail; ++ if (!lte->arch.elfv2_abi) ++ goto fail; ++ else ++ return PPC_PLT_UNRESOLVED; + } + + struct library_symbol *libsym = malloc(sizeof(*libsym)); +@@ -1007,8 +1032,12 @@ ppc_plt_bp_continue(struct breakpoint *b + return; + } + ++#if _CALL_ELF == 2 ++ continue_after_breakpoint(proc, bp); ++#else + jump_to_entry_point(proc, bp); + continue_process(proc->pid); ++#endif + return; + + case PPC64_PLT_STUB: +@@ -1084,7 +1113,11 @@ arch_library_symbol_init(struct library_ + /* We set type explicitly in the code above, where we have the + * necessary context. This is for calls from ltrace-elf.c and + * such. */ ++#if _CALL_ELF == 2 ++ libsym->arch.type = PPC_PLT_UNRESOLVED; ++#else + libsym->arch.type = PPC_DEFAULT; ++#endif + return 0; + } + +Index: b/sysdeps/linux-gnu/ppc/trace.c +=================================================================== +--- a/sysdeps/linux-gnu/ppc/trace.c ++++ b/sysdeps/linux-gnu/ppc/trace.c +@@ -63,9 +63,15 @@ syscall_p(Process *proc, int status, int + if (WIFSTOPPED(status) + && WSTOPSIG(status) == (SIGTRAP | proc->tracesysgood)) { + long pc = (long)get_instruction_pointer(proc); ++#ifndef __LITTLE_ENDIAN__ + int insn = + (int)ptrace(PTRACE_PEEKTEXT, proc->pid, pc - sizeof(long), + 0); ++#else ++ int insn = ++ (int)ptrace(PTRACE_PEEKTEXT, proc->pid, pc - sizeof(int), ++ 0); ++#endif + + if (insn == SYSCALL_INSN) { + *sysnum = +@@ -128,7 +134,11 @@ arch_atomic_singlestep(struct Process *p + return -1; + uint32_t insn; + #ifdef __powerpc64__ ++# ifdef __LITTLE_ENDIAN__ ++ insn = (uint32_t) l; ++# else + insn = l >> 32; ++# endif + #else + insn = l; + #endif +Index: b/configure.ac +=================================================================== +--- a/configure.ac ++++ b/configure.ac +@@ -42,7 +42,7 @@ case "${host_cpu}" in + arm*|sa110) HOST_CPU="arm" ;; + cris*) HOST_CPU="cris" ;; + mips*) HOST_CPU="mips" ;; +- powerpc|powerpc64) HOST_CPU="ppc" ;; ++ powerpc|powerpc64|powerpc64le) HOST_CPU="ppc" ;; + sun4u|sparc64) HOST_CPU="sparc" ;; + s390x) HOST_CPU="s390" ;; + i?86|x86_64) HOST_CPU="x86" ;; +@@ -167,7 +167,7 @@ if test x"$enable_libunwind" = xyes; the + arm*|sa110) UNWIND_ARCH="arm" ;; + i?86) UNWIND_ARCH="x86" ;; + powerpc) UNWIND_ARCH="ppc32" ;; +- powerpc64) UNWIND_ARCH="ppc64" ;; ++ powerpc64|powerpc64le) UNWIND_ARCH="ppc64" ;; + mips*) UNWIND_ARCH="mips" ;; + *) UNWIND_ARCH="${host_cpu}" ;; + esac diff -Nru ltrace-0.7.3/debian/patches/ppc64el-fix-ftbfs.diff ltrace-0.7.3/debian/patches/ppc64el-fix-ftbfs.diff --- ltrace-0.7.3/debian/patches/ppc64el-fix-ftbfs.diff 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/ppc64el-fix-ftbfs.diff 2024-02-02 13:01:56.000000000 +0100 @@ -0,0 +1,48 @@ +Description: Fix backport of support for upstream patch to support ppc64el + While backporting the following patch from upstream to the 0.7.3 version: + + commit 7f20cb21a0d021ab3b12d8a5187beb728f339b0f + Author: Thierry Fauck + AuthorDate: Tue May 13 07:48:24 2014 -0400 + Commit: Petr Machata + CommitDate: Wed Nov 11 19:08:43 2015 +0100 + + Support for powerpc64 arch ppc64el + + Signed-off-by: Thierry Fauck + + Add support for ppc64le proc and ELF ABIv2. + Provides support for irelative and wchar + + We kept the code jumping to the "fail" label from the "arch_elf_add_plt_entry" + function. Unfortunately, the "fail" label does not appear before the same + statement in the original version and in the 0.7.3 version. + In 0.7.3, the statements following the "fail" label assume the existence of + the libsym variable. Therefore, while jumping to this label too soon, we go + over the declaration and assignment of the libsym variable. + + This is nowadays detected by GCC as a use of uninitialized variable. + +Author: Olivier Gayot +Bug-Ubuntu: https://bugs.launchpad.net/ubuntu/+source/ltrace/+bug/1967518 +Forwarded: not-needed +Last-Update: 2022-04-07 +--- +This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ +Index: b/sysdeps/linux-gnu/ppc/plt.c +=================================================================== +--- a/sysdeps/linux-gnu/ppc/plt.c ++++ b/sysdeps/linux-gnu/ppc/plt.c +@@ -769,8 +769,10 @@ + * or allow -1 a valid return code. */ + GElf_Addr plt_slot_value; + int rc = read_plt_slot_value(proc, plt_slot_addr, &plt_slot_value); +- if (rc < 0 && !lte->arch.elfv2_abi) +- goto fail; ++ if (rc < 0 && !lte->arch.elfv2_abi) { ++ free(name); ++ return plt_fail; ++ } + + struct library_symbol *libsym = malloc(sizeof(*libsym)); + if (libsym == NULL) { diff -Nru ltrace-0.7.3/debian/patches/ppc64-fork.patch ltrace-0.7.3/debian/patches/ppc64-fork.patch --- ltrace-0.7.3/debian/patches/ppc64-fork.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/ppc64-fork.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,49 @@ +From 35742523e3daa0e59de0c1c3fdd8e5ff52891967 Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Thu, 9 Jan 2014 23:41:50 +0100 +Subject: [PATCH] Fix a problem in tracing across fork on PPC64 + +In order to avoid single-stepping through large portions of the +dynamic linker, ltrace remembers at which address the instruction that +resolved a PLT slot is. It then puts a breakpoint to this address so +that it can fast-forward to that address next time it needs to catch a +PLT slot being resolved. + +When a process is cloned, the pointer to this breakpoint is simply +copied over to the new process, instead of being looked up in the new +process structures. This patches fixes this. +--- + sysdeps/linux-gnu/ppc/plt.c | 14 +++++++++++++- + 1 file changed, 13 insertions(+), 1 deletion(-) + +Index: b/sysdeps/linux-gnu/ppc/plt.c +=================================================================== +--- a/sysdeps/linux-gnu/ppc/plt.c ++++ b/sysdeps/linux-gnu/ppc/plt.c +@@ -1,6 +1,6 @@ + /* + * This file is part of ltrace. +- * Copyright (C) 2012 Petr Machata, Red Hat Inc. ++ * Copyright (C) 2012,2013,2014 Petr Machata, Red Hat Inc. + * Copyright (C) 2004,2008,2009 Juan Cespedes + * Copyright (C) 2006 Paul Gilliam + * +@@ -1201,6 +1201,18 @@ int + arch_process_clone(struct Process *retp, struct Process *proc) + { + retp->arch = proc->arch; ++ ++ if (retp->arch.dl_plt_update_bp != NULL) { ++ /* Point it to the corresponding breakpoint in RETP. ++ * It must be there, this part of PROC has already ++ * been cloned to RETP. */ ++ retp->arch.dl_plt_update_bp ++ = address2bpstruct(retp, ++ retp->arch.dl_plt_update_bp->addr); ++ ++ assert(retp->arch.dl_plt_update_bp != NULL); ++ } ++ + return 0; + } + diff -Nru ltrace-0.7.3/debian/patches/ppc64le-fixes.patch ltrace-0.7.3/debian/patches/ppc64le-fixes.patch --- ltrace-0.7.3/debian/patches/ppc64le-fixes.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/ppc64le-fixes.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,461 @@ +From: Petr Machata +Subject: Miscellaneous ppc64el fixes +Last-Update: 2016-04-06 + +This is a backport of the following upstream commits: + - [35a9677d] fix bugs in fetch backend of powerpc64le + - [a46c07fc] Fix coding style in PowerPC's arch.h + - [44789e1e] PowerPC: convert ELFv2 conditionals from preprocessor to + plain conditions. + +It was taken from the fedoraproject pkgs ltrace repository: +ihttp://pkgs.fedoraproject.org/cgit/rpms/ltrace.git/commit/?id=fe527c31efcc51d1cdedd15269c2f807503099c5 + +The original changelog in fedora spec mentioned: + +""" +# Support for ppc64le, backported from upstream. +# http://anonscm.debian.org/gitweb/?p=collab-maint/ltrace.git;a=commit;h=eea4ad2cce289753aaa35b4e0258a76d8f8f367c +# https://bugzilla.redhat.com/show_bug.cgi?id=1125601 +Patch13: ltrace-0.7.91-ppc64le-support.patch +# 35a9677dc9dcb7909ebd28f30200474d7e8b660f, +# 437d2377119036346f4dbd93039c847b4cc9d0be, +# eb3993420734f091cde9a6053ca6b4edcf9ae334 +Patch14: ltrace-0.7.91-ppc64le-fixes.patch +""" + +This is Patch14; aka ltrace-0.7.91-ppc64le-fixes.patch + +It was refreshed (backported, if you will) for applying on 0.7.3 by: +Mathieu Trudel-Lapierre + +It is attributed to Petr Machata since there were no origin markings on the +original patch and he did the commit. If that's not correct, we can fix the +attribution when someone speaks up. + +--- + sysdeps/linux-gnu/ppc/arch.h | 41 +++++--- + sysdeps/linux-gnu/ppc/fetch.c | 197 +++++++++++++++++++----------------------- + 2 files changed, 118 insertions(+), 120 deletions(-) + +Index: b/sysdeps/linux-gnu/ppc/arch.h +=================================================================== +--- a/sysdeps/linux-gnu/ppc/arch.h ++++ b/sysdeps/linux-gnu/ppc/arch.h +@@ -32,36 +32,45 @@ + #define LT_ELF_MACHINE EM_PPC + + #ifdef __powerpc64__ // Says 'ltrace' is 64 bits, says nothing about target. +-#define LT_ELFCLASS2 ELFCLASS64 +-#define LT_ELF_MACHINE2 EM_PPC64 ++# define LT_ELFCLASS2 ELFCLASS64 ++# define LT_ELF_MACHINE2 EM_PPC64 + + # ifdef __LITTLE_ENDIAN__ +-# define BREAKPOINT_VALUE { 0x08, 0x00, 0xe0, 0x7f } +-# define ARCH_ENDIAN_LITTLE ++# define BREAKPOINT_VALUE { 0x08, 0x00, 0xe0, 0x7f } ++# define ARCH_ENDIAN_LITTLE + # else +-# define BREAKPOINT_VALUE { 0x7f, 0xe0, 0x00, 0x08 } +-# define ARCH_SUPPORTS_OPD +-# define ARCH_ENDIAN_BIG ++# define BREAKPOINT_VALUE { 0x7f, 0xe0, 0x00, 0x08 } ++# define ARCH_SUPPORTS_OPD ++# define ARCH_ENDIAN_BIG + # endif + +-# if _CALL_ELF != 2 +-# define ARCH_SUPPORTS_OPD +-# define STACK_FRAME_OVERHEAD 112 ++# if !defined(_CALL_ELF) || _CALL_ELF < 2 ++# define ARCH_SUPPORTS_OPD ++# define STACK_FRAME_OVERHEAD 112 + # ifndef EF_PPC64_ABI +-# define EF_PPC64_ABI 3 ++# define EF_PPC64_ABI 3 + # endif +-# else /* _CALL_ELF == 2 ABIv2 */ +-# define STACK_FRAME_OVERHEAD 32 ++# elif _CALL_ELF == 2 /* ELFv2 ABI */ ++# define STACK_FRAME_OVERHEAD 32 ++# else ++# error Unsupported PowerPC64 ABI. + # endif /* CALL_ELF */ + + #else +-#define BREAKPOINT_VALUE { 0x7f, 0xe0, 0x00, 0x08 } +-#define ARCH_ENDIAN_BIG ++# define STACK_FRAME_OVERHEAD 112 ++# define BREAKPOINT_VALUE { 0x7f, 0xe0, 0x00, 0x08 } ++# define ARCH_ENDIAN_BIG + # ifndef EF_PPC64_ABI +-# define EF_PPC64_ABI 3 ++# define EF_PPC64_ABI 3 + # endif + #endif /* __powerpc64__ */ + ++#ifdef _CALL_ELF ++enum { ppc64_call_elf_abi = _CALL_ELF }; ++#else ++enum { ppc64_call_elf_abi = 0 }; ++#endif ++ + #define ARCH_HAVE_ATOMIC_SINGLESTEP + #define ARCH_HAVE_ADD_PLT_ENTRY + #define ARCH_HAVE_TRANSLATE_ADDRESS +Index: b/sysdeps/linux-gnu/ppc/fetch.c +=================================================================== +--- a/sysdeps/linux-gnu/ppc/fetch.c ++++ b/sysdeps/linux-gnu/ppc/fetch.c +@@ -1,6 +1,6 @@ + /* + * This file is part of ltrace. +- * Copyright (C) 2012 Petr Machata, Red Hat Inc. ++ * Copyright (C) 2012, 2014 Petr Machata, Red Hat Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + + #include "backend.h" + #include "fetch.h" +@@ -57,7 +58,7 @@ struct fetch_context { + arch_addr_t stack_pointer; + int greg; + int freg; +- int ret_struct; ++ bool ret_struct; + + union { + gregs32_t r32; +@@ -65,11 +66,29 @@ struct fetch_context { + } regs; + struct fpregs_t fpregs; + int vgreg; +- int struct_size; +- int struct_hfa_size; +- int struct_hfa_count; + }; + ++static bool ++is_eligible_hfa(struct arg_type_info *info, ++ struct arg_type_info **hfa_infop, size_t *hfa_countp) ++{ ++ size_t hfa_count; ++ struct arg_type_info *hfa_info = type_get_hfa_type(info, &hfa_count); ++ ++ if (hfa_info != NULL && hfa_count <= 8 ++ && (hfa_info->type == ARGTYPE_FLOAT ++ || hfa_info->type == ARGTYPE_DOUBLE)) { ++ ++ if (hfa_infop != NULL) ++ *hfa_infop = hfa_info; ++ if (hfa_countp != NULL) ++ *hfa_countp = hfa_count; ++ return true; ++ } ++ ++ return false; ++} ++ + static int + fetch_context_init(struct Process *proc, struct fetch_context *context) + { +@@ -125,30 +144,37 @@ arch_fetch_arg_init(enum tof type, struc + } + + context->vgreg = context->greg; +- context->struct_size = 0; +- context->struct_hfa_size = 0; +- context->struct_hfa_count = 0; + + /* Aggregates or unions of any length, and character strings + * of length longer than 8 bytes, will be returned in a + * storage buffer allocated by the caller. The caller will + * pass the address of this buffer as a hidden first argument + * in r3, causing the first explicit argument to be passed in +- * r4. */ +- context->ret_struct = ret_info->type == ARGTYPE_STRUCT; +- if (context->ret_struct) { +-#if _CALL_ELF == 2 +- /* if R3 points to stack, parameters will be in R4. */ +- uint64_t pstack_end = ptrace(PTRACE_PEEKTEXT, proc->pid, +- proc->stack_pointer, 0); +- if (((arch_addr_t)context->regs.r64[3] > proc->stack_pointer) +- && (context->regs.r64[3] < pstack_end)) { ++ * r4. ++ */ ++ ++ context->ret_struct = false; ++ ++ if (ppc64_call_elf_abi == 2) { ++ /* With ELFv2 ABI, aggregates that consist ++ * (recursively) only of members of the same ++ * floating-point or vector type, are passed in a ++ * series of floating-point resp. vector registers. ++ * Additionally, when returning any aggregate of up to ++ * 16 bytes, general-purpose registers are used. */ ++ ++ if (ret_info->type == ARGTYPE_STRUCT ++ && ! is_eligible_hfa(ret_info, NULL, NULL) ++ && type_sizeof(proc, ret_info) > 16) { ++ ++ context->ret_struct = true; + context->greg++; + context->stack_pointer += 8; + } +-#else ++ ++ } else if (ret_info->type == ARGTYPE_STRUCT) { ++ context->ret_struct = true; + context->greg++; +-#endif + } + + return context; +@@ -176,17 +202,16 @@ allocate_stack_slot(struct fetch_context + + size_t a = type_alignof(proc, info); + size_t off = 0; +- if (proc->e_machine == EM_PPC && a < 4) +- a = 4; +-#if _CALL_ELF == 2 +- else if (proc->e_machine == EM_PPC64 && sz == 4 && is_hfa_type) ++ if (proc->e_machine == EM_PPC && a < 4) { + a = 4; +- else +- a = 8; +-#else +- else if (proc->e_machine == EM_PPC64 && a < 8) +-#endif ++ } else if (ppc64_call_elf_abi == 2) { ++ if (proc->e_machine == EM_PPC64 && sz == 4 && is_hfa_type) { ++ a = 4; ++ } else ++ a = 8; ++ } else if (proc->e_machine == EM_PPC64 && a < 8) { + a = 8; ++ } + + /* XXX Remove the two double casts when arch_addr_t + * becomes integral type. */ +@@ -259,18 +284,19 @@ allocate_gpr(struct fetch_context *ctx, + if (sz == (size_t)-1) + return -1; + assert(sz == 1 || sz == 2 || sz == 4 || sz == 8); +-#if _CALL_ELF == 2 +- /* Consume the stack slot corresponding to this arg. */ +- if ((sz + off) >= 8) +- ctx->greg++; + +- if (is_hfa_type) +- ctx->stack_pointer += sz; +- else +- ctx->stack_pointer += 8; +-#else +- ctx->greg++; +-#endif ++ if (ppc64_call_elf_abi == 2) { ++ /* Consume the stack slot corresponding to this arg. */ ++ if ((sz + off) >= 8) ++ ctx->greg++; ++ ++ if (is_hfa_type) ++ ctx->stack_pointer += sz; ++ else ++ ctx->stack_pointer += 8; ++ } else { ++ ctx->greg++; ++ } + + if (valuep == NULL) + return 0; +@@ -326,7 +352,6 @@ allocate_float(struct fetch_context *ctx + return allocate_stack_slot(ctx, proc, info, valuep, is_hfa_type); + } + +-#if _CALL_ELF == 2 + static int + allocate_hfa(struct fetch_context *ctx, struct Process *proc, + struct arg_type_info *info, struct value *valuep, +@@ -336,27 +361,27 @@ allocate_hfa(struct fetch_context *ctx, + if (sz == (size_t)-1) + return -1; + +- ctx->struct_hfa_size += sz; +- + /* There are two changes regarding structure return types: +- * * heterogeneous float/vector structs are returned +- * in (multiple) FP/vector registers, +- * instead of via implicit reference. +- * * small structs (up to 16 bytes) are return +- * in one or two GPRs, instead of via implicit reference. ++ * * heterogeneous float/vector structs are returned in ++ * (multiple) FP/vector registers, instead of via implicit ++ * reference. ++ * * small structs (up to 16 bytes) are return in one or two ++ * GPRs, instead of via implicit reference. + * + * Other structures (larger than 16 bytes, not heterogeneous) + * are still returned via implicit reference (i.e. a pointer + * to memory where to return the struct being passed in r3). +- * Of course, whether or not an implicit reference pointer +- * is present will shift the remaining arguments, +- * so you need to get this right for ELFv2 in order +- * to get the arguments correct. ++ * Of course, whether or not an implicit reference pointer is ++ * present will shift the remaining arguments, so you need to ++ * get this right for ELFv2 in order to get the arguments ++ * correct. ++ * + * If an actual parameter is known to correspond to an HFA + * formal parameter, each element is passed in the next + * available floating-point argument register starting at fp1 + * until the fp13. The remaining elements of the aggregate are +- * passed on the stack. */ ++ * passed on the stack. ++ */ + size_t slot_off = 0; + + unsigned char *buf = value_reserve(valuep, sz); +@@ -366,26 +391,17 @@ allocate_hfa(struct fetch_context *ctx, + struct arg_type_info *hfa_info = type_get_simple(hfa_type); + size_t hfa_sz = type_sizeof(proc, hfa_info); + +- if (hfa_count > 8) +- ctx->struct_hfa_count += hfa_count; +- + while (hfa_count > 0 && ctx->freg <= 13) { +- int rc; + struct value tmp; +- + value_init(&tmp, proc, NULL, hfa_info, 0); ++ int rc = allocate_float(ctx, proc, hfa_info, ++ &tmp, slot_off, true); ++ if (rc == 0) ++ memcpy(buf, value_get_data(&tmp, NULL), hfa_sz); ++ value_destroy(&tmp); + +- /* Hetereogeneous struct - get value on GPR or stack. */ +- if (((hfa_type == ARGTYPE_FLOAT +- || hfa_type == ARGTYPE_DOUBLE) +- && hfa_count <= 8)) +- rc = allocate_float(ctx, proc, hfa_info, &tmp, +- slot_off, true); +- else +- rc = allocate_gpr(ctx, proc, hfa_info, &tmp, +- slot_off, true); +- +- memcpy(buf, value_get_data(&tmp, NULL), hfa_sz); ++ if (rc < 0) ++ return -1; + + slot_off += hfa_sz; + buf += hfa_sz; +@@ -394,17 +410,13 @@ allocate_hfa(struct fetch_context *ctx, + slot_off = 0; + ctx->vgreg++; + } +- +- value_destroy(&tmp); +- if (rc < 0) +- return -1; + } + if (hfa_count == 0) + return 0; + + /* if no remaining FP, GPR corresponding to slot is used +- * Mostly it is in part of r10. */ +- if (ctx->struct_hfa_size <= 64 && ctx->vgreg == 10) { ++ * Mostly it is in part of r10. */ ++ if (ctx->vgreg == 10) { + while (ctx->vgreg <= 10) { + struct value tmp; + value_init(&tmp, proc, NULL, hfa_info, 0); +@@ -428,11 +440,8 @@ allocate_hfa(struct fetch_context *ctx, + } + } + +- if (hfa_count == 0) +- return 0; +- + /* Remaining values are on stack */ +- while (hfa_count) { ++ while (hfa_count > 0) { + struct value tmp; + value_init(&tmp, proc, NULL, hfa_info, 0); + +@@ -444,7 +453,6 @@ allocate_hfa(struct fetch_context *ctx, + } + return 0; + } +-#endif + + static int + allocate_argument(struct fetch_context *ctx, struct Process *proc, +@@ -459,24 +467,20 @@ allocate_argument(struct fetch_context * + case ARGTYPE_FLOAT: + case ARGTYPE_DOUBLE: + return allocate_float(ctx, proc, info, valuep, +- 8 - type_sizeof(proc,info), false); ++ 8 - type_sizeof(proc,info), false); + + case ARGTYPE_STRUCT: + if (proc->e_machine == EM_PPC) { + if (value_pass_by_reference(valuep) < 0) + return -1; +- } else { +-#if _CALL_ELF == 2 ++ } else if (ppc64_call_elf_abi == 2) { + struct arg_type_info *hfa_info; +- size_t hfa_size; +- hfa_info = type_get_hfa_type(info, &hfa_size); +- if (hfa_info != NULL ) { +- size_t sz = type_sizeof(proc, info); +- ctx->struct_size += sz; ++ size_t hfa_count; ++ if (is_eligible_hfa(info, &hfa_info, &hfa_count)) { + return allocate_hfa(ctx, proc, info, valuep, +- hfa_info->type, hfa_size); ++ hfa_info->type, hfa_count); + } +-#endif ++ } else { + /* PPC64: Fixed size aggregates and unions passed by + * value are mapped to as many doublewords of the + * parameter save area as the value uses in memory. +@@ -510,9 +514,6 @@ allocate_argument(struct fetch_context * + if (sz == (size_t)-1) + return -1; + +- if (ctx->ret_struct) +- ctx->struct_size += sz; +- + size_t slots = (sz + width - 1) / width; /* Round up. */ + unsigned char *buf = value_reserve(valuep, slots * width); + if (buf == NULL) +@@ -605,19 +606,7 @@ arch_fetch_retval(struct fetch_context * + if (fetch_context_init(proc, ctx) < 0) + return -1; + +-#if _CALL_ELF == 2 +- void *ptr = (void *)(ctx->regs.r64[1]+32); +- uint64_t val = ptrace(PTRACE_PEEKTEXT, proc->pid, ptr, 0); +- +- if (ctx->ret_struct +- && ((ctx->struct_size > 64 +- || ctx->struct_hfa_count > 8 +- || (ctx->struct_hfa_size == 0 && ctx->struct_size > 56) +- || (ctx->regs.r64[3] == ctx->regs.r64[1]+32) +- || (ctx->regs.r64[3] == val )))) { +-#else + if (ctx->ret_struct) { +-#endif + assert(info->type == ARGTYPE_STRUCT); + + uint64_t addr = read_gpr(ctx, proc, 3); diff -Nru ltrace-0.7.3/debian/patches/ppc64-unprelink.patch ltrace-0.7.3/debian/patches/ppc64-unprelink.patch --- ltrace-0.7.3/debian/patches/ppc64-unprelink.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/ppc64-unprelink.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,218 @@ +From a0093ca43cf40d7e5f6cebeb64156062d2de46d9 Mon Sep 17 00:00:00 2001 +From: Petr Machata +Date: Fri, 10 Jan 2014 20:06:51 +0100 +Subject: [PATCH 2/2] Don't crash untraced calls via PLT in prelinked PPC64 + binaries + +In prelinked binaries, ltrace has to unprelinks PLT slots in order to +catch calls done through PLT. This makes the calls done through these +slots invalid, because the special first PLT slot is not initialized, +and dynamic linker SIGSEGVs because of this. Ltrace relies on +arranging breakpoints such that the dynamic linker is not actually +entered, and moves PC around itself to simulate the effects of a call +through PLT. + +Originally, arch_elf_add_plt_entry was called only for symbols that +were actually traced. Later this was changed and it's now called for +all PLT entries, and the resulting candidate list is filtered +afterwards. This gives backends a chance to rename the symbol, as is +useful with IRELATIVE PLT calls, where symbol name may not be +available at all. But the PPC backend was never updated to reflect +this, and unresolved all symbols for which arch_elf_add_plt_entry was +called, thus rendering _all_ PLT slots invalid, even those that +weren't later procted by breakpoints. Thus calls done through any +untraced slots failed. + +This patch fixes this problem by deferring the unprelinking of PLT +slots into the on_install hook of breakpoints. +--- + sysdeps/linux-gnu/ppc/arch.h | 21 ++++++++- + sysdeps/linux-gnu/ppc/plt.c | 94 ++++++++++++++++++++++++++++++++++--------- + 2 files changed, 94 insertions(+), 21 deletions(-) + +Index: b/sysdeps/linux-gnu/ppc/arch.h +=================================================================== +--- a/sysdeps/linux-gnu/ppc/arch.h ++++ b/sysdeps/linux-gnu/ppc/arch.h +@@ -1,6 +1,6 @@ + /* + * This file is part of ltrace. +- * Copyright (C) 2012 Petr Machata ++ * Copyright (C) 2012,2013,2014 Petr Machata + * Copyright (C) 2006 Paul Gilliam + * Copyright (C) 2002,2004 Juan Cespedes + * +@@ -120,12 +120,29 @@ enum ppc64_plt_type { + /* Very similar to PPC_PLT_UNRESOLVED, but for JMP_IREL + * slots. */ + PPC_PLT_IRELATIVE, ++ ++ /* Transitional state before the breakpoint is enabled. */ ++ PPC_PLT_NEED_UNRESOLVE, + }; + + #define ARCH_HAVE_LIBRARY_SYMBOL_DATA ++struct ppc_unresolve_data; + struct arch_library_symbol_data { + enum ppc64_plt_type type; +- GElf_Addr resolved_value; ++ ++ /* State Contents ++ * ++ * PPC_DEFAULT N/A ++ * PPC64_PLT_STUB N/A ++ * PPC_PLT_UNRESOLVED PLT entry address. ++ * PPC_PLT_IRELATIVE Likewise. ++ * PPC_PLT_RESOLVED The original value the slot was resolved to. ++ * PPC_PLT_NEED_UNRESOLVE DATA. ++ */ ++ union { ++ GElf_Addr resolved_value; ++ struct ppc_unresolve_data *data; ++ }; + + /* Address of corresponding slot in .plt. */ + GElf_Addr plt_slot_addr; +Index: b/sysdeps/linux-gnu/ppc/plt.c +=================================================================== +--- a/sysdeps/linux-gnu/ppc/plt.c ++++ b/sysdeps/linux-gnu/ppc/plt.c +@@ -715,6 +715,14 @@ unresolve_plt_slot(struct Process *proc, + return 0; + } + ++struct ppc_unresolve_data { ++ struct ppc_unresolve_data *self; /* A canary. */ ++ GElf_Addr plt_entry_addr; ++ GElf_Addr plt_slot_addr; ++ GElf_Addr plt_slot_value; ++ bool is_irelative; ++}; ++ + enum plt_status + arch_elf_add_plt_entry(struct Process *proc, struct ltelf *lte, + const char *a_name, GElf_Rela *rela, size_t ndx, +@@ -816,28 +824,23 @@ arch_elf_add_plt_entry(struct Process *p + && (plt_slot_value == plt_entry_addr || plt_slot_value == 0)) { + libsym->arch.type = PPC_PLT_UNRESOLVED; + libsym->arch.resolved_value = plt_entry_addr; +- + } else { +- /* Unresolve the .plt slot. If the binary was +- * prelinked, this makes the code invalid, because in +- * case of prelinked binary, the dynamic linker +- * doesn't update .plt[0] and .plt[1] with addresses +- * of the resover. But we don't care, we will never +- * need to enter the resolver. That just means that +- * we have to un-un-resolve this back before we +- * detach. */ +- +- if (unresolve_plt_slot(proc, plt_slot_addr, plt_entry_addr) < 0) { +- library_symbol_destroy(libsym); ++ /* Mark the symbol for later unresolving. We may not ++ * do this right away, as this is called by ltrace ++ * core for all symbols, and only later filtered. We ++ * only unresolve the symbol before the breakpoint is ++ * enabled. */ ++ ++ libsym->arch.type = PPC_PLT_NEED_UNRESOLVE; ++ libsym->arch.data = malloc(sizeof *libsym->arch.data); ++ if (libsym->arch.data == NULL) + goto fail; +- } + +- if (! is_irelative) { +- mark_as_resolved(libsym, plt_slot_value); +- } else { +- libsym->arch.type = PPC_PLT_IRELATIVE; +- libsym->arch.resolved_value = plt_entry_addr; +- } ++ libsym->arch.data->self = libsym->arch.data; ++ libsym->arch.data->plt_entry_addr = plt_entry_addr; ++ libsym->arch.data->plt_slot_addr = plt_slot_addr; ++ libsym->arch.data->plt_slot_value = plt_slot_value; ++ libsym->arch.data->is_irelative = is_irelative; + } + + *ret = libsym; +@@ -1041,6 +1044,7 @@ ppc_plt_bp_continue(struct breakpoint *b + return; + + case PPC64_PLT_STUB: ++ case PPC_PLT_NEED_UNRESOLVE: + /* These should never hit here. */ + break; + } +@@ -1107,6 +1111,52 @@ arch_library_clone(struct library *retp, + { + } + ++static void ++ppc_plt_bp_install(struct breakpoint *bp, struct Process *proc) ++{ ++ /* This should not be an artificial breakpoint. */ ++ struct library_symbol *libsym = bp->libsym; ++ if (libsym == NULL) ++ libsym = bp->arch.irel_libsym; ++ assert(libsym != NULL); ++ ++ if (libsym->arch.type == PPC_PLT_NEED_UNRESOLVE) { ++ /* Unresolve the .plt slot. If the binary was ++ * prelinked, this makes the code invalid, because in ++ * case of prelinked binary, the dynamic linker ++ * doesn't update .plt[0] and .plt[1] with addresses ++ * of the resover. But we don't care, we will never ++ * need to enter the resolver. That just means that ++ * we have to un-un-resolve this back before we ++ * detach. */ ++ ++ struct ppc_unresolve_data *data = libsym->arch.data; ++ libsym->arch.data = NULL; ++ assert(data->self == data); ++ ++ GElf_Addr plt_slot_addr = data->plt_slot_addr; ++ GElf_Addr plt_slot_value = data->plt_slot_value; ++ GElf_Addr plt_entry_addr = data->plt_entry_addr; ++ ++ if (unresolve_plt_slot(proc, plt_slot_addr, ++ plt_entry_addr) == 0) { ++ if (! data->is_irelative) { ++ mark_as_resolved(libsym, plt_slot_value); ++ } else { ++ libsym->arch.type = PPC_PLT_IRELATIVE; ++ libsym->arch.resolved_value = plt_entry_addr; ++ } ++ } else { ++ fprintf(stderr, "Couldn't unresolve %s@%p. Not tracing" ++ " this symbol.\n", ++ breakpoint_name(bp), bp->addr); ++ proc_remove_breakpoint(proc, bp); ++ } ++ ++ free(data); ++ } ++} ++ + int + arch_library_symbol_init(struct library_symbol *libsym) + { +@@ -1124,6 +1174,11 @@ arch_library_symbol_init(struct library_ + void + arch_library_symbol_destroy(struct library_symbol *libsym) + { ++ if (libsym->arch.type == PPC_PLT_NEED_UNRESOLVE) { ++ assert(libsym->arch.data->self == libsym->arch.data); ++ free(libsym->arch.data); ++ libsym->arch.data = NULL; ++ } + } + + int +@@ -1159,6 +1214,7 @@ arch_breakpoint_init(struct Process *pro + static struct bp_callbacks cbs = { + .on_continue = ppc_plt_bp_continue, + .on_retract = ppc_plt_bp_retract, ++ .on_install = ppc_plt_bp_install, + }; + breakpoint_set_callbacks(bp, &cbs); + diff -Nru ltrace-0.7.3/debian/patches/ppc-bias.patch ltrace-0.7.3/debian/patches/ppc-bias.patch --- ltrace-0.7.3/debian/patches/ppc-bias.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/ppc-bias.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,134 @@ +From: Petr Machata +Subject: Fix bias handling in PPC backend +Last-Update: 2016-04-06 + +This is a backport of the following upstream commits: + - bf821009: Fix address biasing in PPC backend + - d80c5371: Fix cloning of PPC_PLT_NEED_UNRESOLVE breakpoints + - d8f1287b: Nits + +It was taken from the fedoraproject pkgs ltrace repository: +http://pkgs.fedoraproject.org/cgit/rpms/ltrace.git/commit/?id=5f8efb0257eaa772639d5a4912a6b5e3a709ceab + +The original changelog in fedora spec mentioned: + +""" +# https://bugzilla.redhat.com/show_bug.cgi?id=1171165 +# http://anonscm.debian.org/cgit/collab-maint/ltrace.git/commit/?id=d8f1287b85e2c2b2ae0235809e956f4365e53c45 +# http://anonscm.debian.org/cgit/collab-maint/ltrace.git/commit/?id=d80c5371454383e3f9978622e5578cf02af8c44c +# http://anonscm.debian.org/cgit/collab-maint/ltrace.git/commit/?id=bf82100966deda9c7d26ad085d97c08126a8ae88 +Patch16: ltrace-0.7.91-ppc-bias.patch + +[...] + + * Tue Dec 9 2014 Petr Machata - 0.7.91-11 + - Fix bias handling in PPC backend + - Fix cloning of unresolved breakpoints in PPC backend + (ltrace-0.7.91-ppc-bias.patch) + +""" + +This is Patch16; aka ltrace-0.7.91-ppc-bias.patch + +It was refreshed (backported, if you will) for applying on 0.7.3 by: +Mathieu Trudel-Lapierre + +It is attributed to Petr Machata since there were no origin markings on the +original patch and he did the commit. If that's not correct, we can fix the +attribution when someone speaks up. + +--- + sysdeps/linux-gnu/ppc/plt.c | 36 +++++++++++++++++++++++------------- + 1 file changed, 23 insertions(+), 13 deletions(-) + +Index: b/sysdeps/linux-gnu/ppc/plt.c +=================================================================== +--- a/sysdeps/linux-gnu/ppc/plt.c ++++ b/sysdeps/linux-gnu/ppc/plt.c +@@ -309,14 +309,15 @@ arch_plt_sym_val(struct ltelf *lte, size + + assert(rela->r_addend != 0); + /* XXX double cast */ +- arch_addr_t res_addr = (arch_addr_t) (uintptr_t) rela->r_addend; ++ arch_addr_t res_addr ++ = (arch_addr_t) (uintptr_t) (rela->r_addend + lte->bias); + if (arch_translate_address(lte, res_addr, &res_addr) < 0) { + fprintf(stderr, "Couldn't OPD-translate IRELATIVE " + "resolver address.\n"); + return 0; + } + /* XXX double cast */ +- return (GElf_Addr) (uintptr_t) res_addr; ++ return (GElf_Addr) (uintptr_t) (res_addr - lte->bias); + + } else { + /* We put brakpoints to PLT entries the same as the +@@ -518,7 +519,7 @@ arch_elf_init(struct ltelf *lte, struct + #ifndef EF_PPC64_ABI + assert (! (lte->ehdr.e_flags & 3 ) == 2) + #else +- lte->arch.elfv2_abi=((lte->ehdr.e_flags & EF_PPC64_ABI) == 2) ; ++ lte->arch.elfv2_abi = ((lte->ehdr.e_flags & EF_PPC64_ABI) == 2); + #endif + + if (lte->ehdr.e_machine == EM_PPC64 +@@ -792,15 +793,15 @@ arch_elf_add_plt_entry(struct Process *p + assert(plt_slot_addr >= lte->plt_addr + || plt_slot_addr < lte->plt_addr + lte->plt_size); + ++ plt_entry_addr += lte->bias; ++ plt_slot_addr += lte->bias; ++ + /* Should avoid to do read if dynamic linker hasn't run yet + * or allow -1 a valid return code. */ + GElf_Addr plt_slot_value; +- if (read_plt_slot_value(proc, plt_slot_addr, &plt_slot_value) < 0) { +- if (!lte->arch.elfv2_abi) +- goto fail; +- else +- return PPC_PLT_UNRESOLVED; +- } ++ int rc = read_plt_slot_value(proc, plt_slot_addr, &plt_slot_value); ++ if (rc < 0 && !lte->arch.elfv2_abi) ++ goto fail; + + struct library_symbol *libsym = malloc(sizeof(*libsym)); + if (libsym == NULL) { +@@ -820,8 +821,9 @@ arch_elf_add_plt_entry(struct Process *p + goto fail; + libsym->arch.plt_slot_addr = plt_slot_addr; + +- if (! is_irelative +- && (plt_slot_value == plt_entry_addr || plt_slot_value == 0)) { ++ if (rc < 0 || (! is_irelative ++ && (plt_slot_value == plt_entry_addr ++ || plt_slot_value == 0))) { + libsym->arch.type = PPC_PLT_UNRESOLVED; + libsym->arch.resolved_value = plt_entry_addr; + } else { +@@ -1147,8 +1149,8 @@ ppc_plt_bp_install(struct breakpoint *bp + libsym->arch.resolved_value = plt_entry_addr; + } + } else { +- fprintf(stderr, "Couldn't unresolve %s@%p. Not tracing" +- " this symbol.\n", ++ fprintf(stderr, "Couldn't unresolve %s@%p. Will not" ++ " trace this symbol.\n", + breakpoint_name(bp), bp->addr); + proc_remove_breakpoint(proc, bp); + } +@@ -1186,6 +1188,14 @@ arch_library_symbol_clone(struct library + struct library_symbol *libsym) + { + retp->arch = libsym->arch; ++ if (libsym->arch.type == PPC_PLT_NEED_UNRESOLVE) { ++ assert(libsym->arch.data->self == libsym->arch.data); ++ retp->arch.data = malloc(sizeof *retp->arch.data); ++ if (retp->arch.data == NULL) ++ return -1; ++ *retp->arch.data = *libsym->arch.data; ++ retp->arch.data->self = retp->arch.data; ++ } + return 0; + } + diff -Nru ltrace-0.7.3/debian/patches/ptrace.diff ltrace-0.7.3/debian/patches/ptrace.diff --- ltrace-0.7.3/debian/patches/ptrace.diff 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/ptrace.diff 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,68 @@ +Description: try to make PTRACE scope sysctl more discoverable. +Updated: 2014-05-07 + +Index: ltrace-0.7.3/sysdeps/linux-gnu/trace.c +=================================================================== +--- ltrace-0.7.3.orig/sysdeps/linux-gnu/trace.c 2014-05-07 15:17:07.949872643 -0400 ++++ ltrace-0.7.3/sysdeps/linux-gnu/trace.c 2014-05-07 15:24:08.077866134 -0400 +@@ -49,7 +49,7 @@ + #include "type.h" + + void +-trace_fail_warning(pid_t pid) ++trace_fail_warning(pid_t pid, int err) + { + /* This was adapted from GDB. */ + #ifdef HAVE_LIBSELINUX +@@ -66,6 +66,11 @@ + "tracing other processes. You can disable this process attach protection by\n" + "issuing 'setsebool deny_ptrace=0' in the superuser context.\n"); + #endif /* HAVE_LIBSELINUX */ ++ if (err == EPERM) ++ fprintf(stderr, ++ "Could not attach to process. If your uid matches the uid of the target\n" ++ "process, check the setting of /proc/sys/kernel/yama/ptrace_scope, or try\n" ++ "again as the root user. For more details, see /etc/sysctl.d/10-ptrace.conf\n"); + } + + void +@@ -73,8 +78,9 @@ + { + debug(DEBUG_PROCESS, "trace_me: pid=%d", getpid()); + if (ptrace(PTRACE_TRACEME, 0, 0, 0) < 0) { ++ int errno_save = errno; + perror("PTRACE_TRACEME"); +- trace_fail_warning(getpid()); ++ trace_fail_warning(getpid(), errno_save); + exit(1); + } + } +Index: ltrace-0.7.3/backend.h +=================================================================== +--- ltrace-0.7.3.orig/backend.h 2014-05-07 15:03:16.000000000 -0400 ++++ ltrace-0.7.3/backend.h 2014-05-07 15:25:00.805865317 -0400 +@@ -150,7 +150,7 @@ + + /* Called when trace_me or primary trace_pid fail. This may plug in + * any platform-specific knowledge of why it could be so. */ +-void trace_fail_warning(pid_t pid); ++void trace_fail_warning(pid_t pid, int err); + + /* A pair of functions called to initiate a detachment request when + * ltrace is about to exit. Their job is to undo any effects that +Index: ltrace-0.7.3/proc.c +=================================================================== +--- ltrace-0.7.3.orig/proc.c 2014-05-07 15:03:16.000000000 -0400 ++++ ltrace-0.7.3/proc.c 2014-05-07 15:24:42.881865595 -0400 +@@ -496,9 +496,10 @@ + + /* First, see if we can attach the requested PID itself. */ + if (open_one_pid(pid)) { ++ int errno_save = errno; + fprintf(stderr, "Cannot attach to pid %u: %s\n", + pid, strerror(errno)); +- trace_fail_warning(pid); ++ trace_fail_warning(pid, errno_save); + return; + } + diff -Nru ltrace-0.7.3/debian/patches/series ltrace-0.7.3/debian/patches/series --- ltrace-0.7.3/debian/patches/series 2016-09-21 13:15:35.000000000 +0200 +++ ltrace-0.7.3/debian/patches/series 2024-02-02 13:02:23.000000000 +0100 @@ -5,4 +5,36 @@ 05-sparc-ftbfs 06-unexpected-breakpoint gcc-5.diff +ptrace.diff +add_irelative_tracing_b420a226.patch +find_irelative_b061bae3.patch +keep_plt_reloc_in_vector_673ff510.patch +add_elf_read_u8_3c636fb7.patch +add_elf_read_next_u_439ab5bf.patch +add_elf_can_read_next_5c37171a.patch +add_elf_each_symbol_7a29f9e7.patch +elf_read_uleb128_184779e4.patch +arm_attr_decoding_df7d2311.patch +arm_fetch_backend_97a25160.patch +arm_bpc_62fc7747.patch +arm_vfp_params_1c8596d4.patch +arm_vararg_without_vfp_88a0fe50.patch +arm_backend_fixes_1383e5bd.patch +jmp_irel.patch +elf_load_dynamic_entry_4f2f66e6.patch +arm_plt_rel_9e33f5ac.patch +dont_ltelf_destroy_if_init_fails_0ba3c5ee.patch +ppc64el.diff +ppc64le-fixes.patch +ppc64-fork.patch +on_install_breakpoint_56134ff5.patch +ppc64-unprelink.patch +ppc-bias.patch +Move-get_hfa_type-from-IA64-backend-to-type.c-name-i.patch +Set-child-stack-alignment-in-trace-clone.c.patch +Implement-aarch64-support.patch +add-missing-stdint.h-include.patch +Add-missing-include-stdio.h.patch deprecated-readdir_r.diff +ppc64el-fix-ftbfs.diff +lp1992939-add-intel-cet-support.patch diff -Nru ltrace-0.7.3/debian/patches/Set-child-stack-alignment-in-trace-clone.c.patch ltrace-0.7.3/debian/patches/Set-child-stack-alignment-in-trace-clone.c.patch --- ltrace-0.7.3/debian/patches/Set-child-stack-alignment-in-trace-clone.c.patch 1970-01-01 01:00:00.000000000 +0100 +++ ltrace-0.7.3/debian/patches/Set-child-stack-alignment-in-trace-clone.c.patch 2024-02-02 12:39:11.000000000 +0100 @@ -0,0 +1,31 @@ +Author: Petr Machata +Description: Set child stack alignment in trace-clone.c + This is important on aarch64, which requires 16-byte aligned + stack pointer. This might be relevant on other arches as well, + I suspect we just happened to get the 16-byte boundary in some + cases. +Applied-Upstream: http://anonscm.debian.org/gitweb/?p=collab-maint/ltrace.git;a=commit;h=0b5457a9e59978bcd2eb5240f54838910365a93c +Last-Update: 2014-03-13 + +Index: ltrace/testsuite/ltrace.minor/trace-clone.c +=================================================================== +--- ltrace.orig/testsuite/ltrace.minor/trace-clone.c 2014-03-12 16:13:44.075726000 -0600 ++++ ltrace/testsuite/ltrace.minor/trace-clone.c 2014-03-12 16:16:14.593150571 -0600 +@@ -8,6 +8,7 @@ + #include + #include + #include ++#include + + int child () + { +@@ -22,7 +23,8 @@ + int main () + { + pid_t pid; +- static char stack[STACK_SIZE]; ++ static __attribute__ ((aligned (16))) char stack[STACK_SIZE]; ++ + #ifdef __ia64__ + pid = __clone2((myfunc)&child, stack, STACK_SIZE, CLONE_FS, NULL); + #else diff -Nru ltrace-0.7.3/debian/rules ltrace-0.7.3/debian/rules --- ltrace-0.7.3/debian/rules 2022-05-03 01:51:07.000000000 +0200 +++ ltrace-0.7.3/debian/rules 2024-02-02 13:13:08.000000000 +0100 @@ -1,5 +1,9 @@ #!/usr/bin/make -f +# The configure logic for this is backwards. We need this before checking +# for elfutils (ELF_C_READ_MMAP) +CFLAGS=-D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE + %: dh $@